[14/51] [partial] hbase-site git commit: Published site at .

2017-11-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1a616706/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index d309d87..1bddf29 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -135,368 +135,369 @@
 127   * localhost if the invocation target 
is 'this' server; save on network and protobuf
 128   * invocations.
 129   */
-130  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
-131  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
-132private final ServerName 
serverName;
-133private final 
AdminService.BlockingInterface localHostAdmin;
-134private final 
ClientService.BlockingInterface localHostClient;
-135
-136private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
-137ServerName serverName, 
AdminService.BlockingInterface admin,
-138ClientService.BlockingInterface 
client)
-139throws IOException {
-140  super(conf, pool, user);
-141  this.serverName = serverName;
-142  this.localHostAdmin = admin;
-143  this.localHostClient = client;
-144}
-145
-146@Override
-147public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
-148  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
-149}
-150
-151@Override
-152public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-153  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
-154}
-155
-156@Override
-157public MasterKeepAliveConnection 
getKeepAliveMasterService() throws MasterNotRunningException {
-158  if (this.localHostClient instanceof 
MasterService.BlockingInterface) {
-159return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
-160  }
-161  return 
super.getKeepAliveMasterService();
-162}
-163  }
-164
-165  /**
-166   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-167   * deserialization, networking, etc..) 
when talking to a local server.
-168   * @param conf the current 
configuration
-169   * @param pool the thread pool to use 
for batch operations
-170   * @param user the user the connection 
is for
-171   * @param serverName the local server 
name
-172   * @param admin the admin interface of 
the local server
-173   * @param client the client interface 
of the local server
-174   * @return an short-circuit 
connection.
-175   * @throws IOException if IO failure 
occurred
-176   */
-177  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-178  ExecutorService pool, User user, 
final ServerName serverName,
-179  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-180  throws IOException {
-181if (user == null) {
-182  user = 
UserProvider.instantiate(conf).getCurrent();
-183}
-184return new 
ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, 
client);
-185  }
-186
-187  /**
-188   * Setup the connection class, so that 
it will not depend on master being online. Used for testing
-189   * @param conf configuration to set
-190   */
-191  @VisibleForTesting
-192  public static void 
setupMasterlessConnection(Configuration conf) {
-193
conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
MasterlessConnection.class.getName());
-194  }
-195
-196  /**
-197   * Some tests shut down the master. But 
table availability is a master RPC which is performed on
-198   * region re-lookups.
-199   */
-200  static class MasterlessConnection 
extends ConnectionImplementation {
-201MasterlessConnection(Configuration 
conf, ExecutorService pool, User user) throws IOException {
-202  super(conf, pool, user);
-203}
-204
-205@Override
-206public boolean 
isTableDisabled(TableName tableName) throws IOException {
-207  // treat all tables as enabled
-208  return false;
-209}
-210  }
-211
-212  /**
-213   * Return retires + 1. The returned 
value will be in range [1, Integer.MAX_VALUE].
-214   */
-215  static int retries2Attempts(int 
retries) {
-216return Math.max(1, retries == 
Integer.MAX_VALUE ? Integer.MAX_VALUE : retries + 1);
-217  }
-218
-219  /**
-220   * Get a unique key for the rpc stub to 
the given server.
-221   */
-222  static String getStubKey(String 
serviceName, ServerName serverName, boolean 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/48aaec11/devapidocs/org/apache/hadoop/hbase/zookeeper/ZkAclReset.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZkAclReset.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZkAclReset.html
deleted file mode 100644
index 0678ed5..000
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZkAclReset.html
+++ /dev/null
@@ -1,421 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-ZkAclReset (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9,"i1":10,"i2":9,"i3":9,"i4":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.zookeeper
-Class ZkAclReset
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.conf.Configured
-
-
-org.apache.hadoop.hbase.zookeeper.ZkAclReset
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-org.apache.hadoop.conf.Configurable, org.apache.hadoop.util.Tool
-
-
-
-@InterfaceAudience.Private
-public class ZkAclReset
-extends org.apache.hadoop.conf.Configured
-implements org.apache.hadoop.util.Tool
-You may add the jaas.conf option
--Djava.security.auth.login.config=/PATH/jaas.conf
-
- You may also specify -D to set options
-"hbase.zookeeper.quorum"(it should be in hbase-site.xml)
-"zookeeper.znode.parent"(it should be in hbase-site.xml)
-
- Use -set-acls to set the ACLs, no option to erase ACLs
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static 
org.apache.commons.logging.Log
-LOG
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-ZkAclReset()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-static void
-main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
-
-
-private void
-printUsageAndExit()
-
-
-private static void
-resetAcls(org.apache.hadoop.conf.Configurationconf,
- booleaneraseAcls)
-
-
-private static void
-resetAcls(ZooKeeperWatcherzkw,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringznode,
- booleaneraseAcls)
-
-
-int
-run(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
-
-
-
-
-
-
-Methods inherited from classorg.apache.hadoop.conf.Configured
-getConf, setConf
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e60b829c/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
index b5293d0..02e4554 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
@@ -38,11 +38,11 @@
 030import java.util.Collection;
 031import java.util.EnumSet;
 032import java.util.HashMap;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.TreeMap;
+033import java.util.Iterator;
+034import java.util.LinkedList;
+035import java.util.List;
+036import java.util.Map;
+037import java.util.Set;
 038import java.util.concurrent.Callable;
 039import 
java.util.concurrent.ExecutionException;
 040import java.util.concurrent.Future;
@@ -233,3930 +233,3945 @@
 225public class HBaseAdmin implements Admin 
{
 226  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
 227
-228  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+228  private ClusterConnection connection;
 229
-230  private ClusterConnection connection;
-231
-232  private volatile Configuration conf;
-233  private final long pause;
-234  private final int numRetries;
-235  private final int syncWaitTimeout;
-236  private boolean aborted;
-237  private int operationTimeout;
-238  private int rpcTimeout;
-239
-240  private RpcRetryingCallerFactory 
rpcCallerFactory;
-241  private RpcControllerFactory 
rpcControllerFactory;
+230  private volatile Configuration conf;
+231  private final long pause;
+232  private final int numRetries;
+233  private final int syncWaitTimeout;
+234  private boolean aborted;
+235  private int operationTimeout;
+236  private int rpcTimeout;
+237
+238  private RpcRetryingCallerFactory 
rpcCallerFactory;
+239  private RpcControllerFactory 
rpcControllerFactory;
+240
+241  private NonceGenerator ng;
 242
-243  private NonceGenerator ng;
-244
-245  @Override
-246  public int getOperationTimeout() {
-247return operationTimeout;
-248  }
-249
-250  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-251this.conf = 
connection.getConfiguration();
-252this.connection = connection;
-253
-254// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-255this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-256
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-257this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-258
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-259this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-260
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-261this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-262
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-263this.syncWaitTimeout = 
this.conf.getInt(
-264  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-265
-266this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-267this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-268
-269this.ng = 
this.connection.getNonceGenerator();
-270  }
-271
-272  @Override
-273  public void abort(String why, Throwable 
e) {
-274// Currently does nothing but throw 
the passed message and exception
-275this.aborted = true;
-276throw new RuntimeException(why, e);
-277  }
-278
-279  @Override
-280  public boolean isAborted() {
-281return this.aborted;
-282  }
-283
-284  @Override
-285  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-286  throws IOException {
-287return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-288  TimeUnit.MILLISECONDS);
-289  }
-290
-291  @Override
-292  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294Boolean abortProcResponse =
-295executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-296getRpcControllerFactory()) 
{
-297  @Override
-298  protected AbortProcedureResponse 
rpcCall() throws Exception {
-299AbortProcedureRequest 
abortProcRequest =
-300
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-301return 
master.abortProcedure(getRpcController(), abortProcRequest);
-302  }
-303}).getIsProcedureAborted();
-304return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-305  }
-306
-307  @Override
-308  public 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67a6e2ec/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.html
index cd57a8d..a74ea7f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.html
@@ -137,7 +137,7 @@ implements Field and Description
 
 
-private AsyncTableBuilder? 
extends AsyncTableBase
+private AsyncTableBuilder?
 tableBuilder
 
 
@@ -160,7 +160,7 @@ implements 
 AsyncBufferedMutatorBuilderImpl(AsyncConnectionConfigurationconnConf,
-   AsyncTableBuilder? 
extends AsyncTableBasetableBuilder)
+   AsyncTableBuilder?tableBuilder)
 
 
 
@@ -259,7 +259,7 @@ implements 
 
 tableBuilder
-private finalAsyncTableBuilder? 
extends AsyncTableBase tableBuilder
+private finalAsyncTableBuilder? tableBuilder
 
 
 
@@ -286,7 +286,7 @@ implements 
 AsyncBufferedMutatorBuilderImpl
 publicAsyncBufferedMutatorBuilderImpl(AsyncConnectionConfigurationconnConf,
-   AsyncTableBuilder? 
extends AsyncTableBasetableBuilder)
+   AsyncTableBuilder?tableBuilder)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67a6e2ec/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.html
index 9dd1e62..fa2df14 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 class AsyncBufferedMutatorImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncBufferedMutator
-The implementation of AsyncBufferedMutator. Simply 
wrap an AsyncTableBase.
+The implementation of AsyncBufferedMutator. Simply 
wrap an AsyncTable.
 
 
 
@@ -153,7 +153,7 @@ implements mutations
 
 
-private AsyncTableBase
+private AsyncTable?
 table
 
 
@@ -175,7 +175,7 @@ implements Constructor and Description
 
 
-AsyncBufferedMutatorImpl(AsyncTableBasetable,
+AsyncBufferedMutatorImpl(AsyncTable?table,
 longwriteBufferSize)
 
 
@@ -268,7 +268,7 @@ implements 
 
 table
-private finalAsyncTableBase table
+private finalAsyncTable? table
 
 
 
@@ -324,13 +324,13 @@ implements 
+
 
 
 
 
 AsyncBufferedMutatorImpl
-AsyncBufferedMutatorImpl(AsyncTableBasetable,
+AsyncBufferedMutatorImpl(AsyncTable?table,
  longwriteBufferSize)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67a6e2ec/devapidocs/org/apache/hadoop/hbase/client/AsyncClientScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncClientScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncClientScanner.html
index 83439e4..325fa89 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncClientScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncClientScanner.html
@@ -160,7 +160,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 conn
 
 
-private RawScanResultConsumer
+private AdvancedScanResultConsumer
 consumer
 
 
@@ -218,8 +218,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-AsyncClientScanner(Scanscan,
-  RawScanResultConsumerconsumer,
+AsyncClientScanner(Scanscan,
+  AdvancedScanResultConsumerconsumer,
   TableNametableName,
   AsyncConnectionImplconn,
   longpauseNs,
@@ -307,7 +307,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 consumer
-private finalRawScanResultConsumer 
consumer
+private finalAdvancedScanResultConsumer consumer
 
 
 
@@ -399,14 +399,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Constructor Detail
-
+
 
 
 
 
 AsyncClientScanner
 publicAsyncClientScanner(Scanscan,
-  RawScanResultConsumerconsumer,
+  AdvancedScanResultConsumerconsumer,
   TableNametableName,
   AsyncConnectionImplconn,
   longpauseNs,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67a6e2ec/devapidocs/org/apache/hadoop/hbase/client/AsyncConnection.html

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cba900e4/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
index 1430cbf..a2eafc9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract class CoprocessorHost.ObserverOperationWithResultO,R
+public abstract class CoprocessorHost.ObserverOperationWithResultO,R
 extends CoprocessorHost.ObserverOperationO
 
 
@@ -266,7 +266,7 @@ extends 
 
 result
-privateR result
+privateR result
 
 
 
@@ -285,7 +285,7 @@ extends 
 
 ObserverOperationWithResult
-publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
Rresult)
 
 
@@ -297,7 +297,7 @@ extends 
 
 ObserverOperationWithResult
-publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
Rresult,
booleanbypassable)
 
@@ -310,7 +310,7 @@ extends 
 
 ObserverOperationWithResult
-publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+publicObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
Rresult,
Useruser)
 
@@ -323,7 +323,7 @@ extends 
 
 ObserverOperationWithResult
-privateObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+privateObserverOperationWithResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
 Rresult,
 Useruser,
 booleanbypassable)
@@ -345,7 +345,7 @@ extends 
 
 call
-protected abstractRcall(Oobserver)
+protected abstractRcall(Oobserver)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -359,7 +359,7 @@ extends 
 
 getResult
-protectedRgetResult()
+protectedRgetResult()
 
 
 
@@ -368,7 +368,7 @@ extends 
 
 callObserver
-voidcallObserver()
+voidcallObserver()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cba900e4/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
index cc376cb..934349c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
@@ -131,7 +131,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract class CoprocessorHost.ObserverOperationWithoutResultO
+public abstract class CoprocessorHost.ObserverOperationWithoutResultO
 extends CoprocessorHost.ObserverOperationO
 
 
@@ -246,7 +246,7 @@ extends 
 
 ObserverOperationWithoutResult
-publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter)
+publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter)
 
 
 
@@ -255,7 +255,7 @@ extends 
 
 ObserverOperationWithoutResult
-publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
   Useruser)
 
 
@@ -265,7 +265,7 @@ extends 
 
 ObserverOperationWithoutResult
-publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
+publicObserverOperationWithoutResult(CoprocessorHost.ObserverGetterC,OobserverGetter,
   Useruser,
   booleanbypassable)
 
@@ -286,7 +286,7 @@ extends 
 
 call
-protected abstractvoidcall(Oobserver)
+protected abstractvoidcall(Oobserver)
   throws 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6607d33c/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index e75cd67..ec0de14 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -298,7 +298,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -307,7 +307,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -315,55 +315,55 @@ service.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
-AsyncMetaTableAccessor.getRegionLocations(Resultr)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Resultr)
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
+AsyncMetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
+MetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Resultr,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
-AsyncMetaTableAccessor.getServerName(Resultr,
+static ServerName
+MetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Resultr,
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
+AsyncMetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
-AsyncMetaTableAccessor.getTableState(Resultr)
-
-
 static TableState
 MetaTableAccessor.getTableState(Resultr)
 Decode table state from META Result.
 
 
+
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
+AsyncMetaTableAccessor.getTableState(Resultr)
+
 
 void
 AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.onNext(Result[]results,
@@ -459,13 +459,13 @@ service.
 ClientScanner.cache
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-CompleteScanResultCache.partialResults
-
-
 private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
 BatchScanResultCache.partialResults
 
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
+CompleteScanResultCache.partialResults
+
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
 title="class or interface in java.util">QueueResult
 AsyncTableResultScanner.queue
@@ -488,7 +488,7 @@ service.
 
 
 Result[]
-AllowPartialScanResultCache.addAndGet(Result[]results,
+BatchScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
@@ -498,26 +498,22 @@ service.
 
 
 Result[]
-BatchScanResultCache.addAndGet(Result[]results,
+AllowPartialScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
 Result
-Table.append(Appendappend)
+HTable.append(Appendappend)
 Appends values to one or more columns within a single 
row.
 
 
 
 Result
-HTable.append(Appendappend)

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/abb69192/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
index a89df18..ea0bc8c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
@@ -49,26 +49,26 @@
 041import 
org.apache.hadoop.hbase.ByteBufferKeyValue;
 042import 
org.apache.hadoop.hbase.SizeCachedKeyValue;
 043import 
org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
-044import 
org.apache.yetus.audience.InterfaceAudience;
-045import 
org.apache.hadoop.hbase.fs.HFileSystem;
-046import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-047import 
org.apache.hadoop.hbase.io.compress.Compression;
-048import 
org.apache.hadoop.hbase.io.crypto.Cipher;
-049import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-050import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
-051import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-052import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-053import 
org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-054import 
org.apache.hadoop.hbase.nio.ByteBuff;
-055import 
org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-056import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-057import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-058import 
org.apache.hadoop.hbase.util.Bytes;
-059import 
org.apache.hadoop.hbase.util.IdLock;
-060import 
org.apache.hadoop.hbase.util.ObjectIntPair;
-061import 
org.apache.hadoop.io.WritableUtils;
-062import org.apache.htrace.Trace;
-063import org.apache.htrace.TraceScope;
+044import 
org.apache.hadoop.hbase.trace.TraceUtil;
+045import 
org.apache.yetus.audience.InterfaceAudience;
+046import 
org.apache.hadoop.hbase.fs.HFileSystem;
+047import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+048import 
org.apache.hadoop.hbase.io.compress.Compression;
+049import 
org.apache.hadoop.hbase.io.crypto.Cipher;
+050import 
org.apache.hadoop.hbase.io.crypto.Encryption;
+051import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
+052import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+053import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+054import 
org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
+055import 
org.apache.hadoop.hbase.nio.ByteBuff;
+056import 
org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+057import 
org.apache.hadoop.hbase.security.EncryptionUtil;
+058import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.IdLock;
+061import 
org.apache.hadoop.hbase.util.ObjectIntPair;
+062import 
org.apache.hadoop.io.WritableUtils;
+063import 
org.apache.htrace.core.TraceScope;
 064
 065import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 066
@@ -263,1235 +263,1235 @@
 255// Prefetch file blocks upon open if 
requested
 256if (cacheConf.shouldPrefetchOnOpen()) 
{
 257  PrefetchExecutor.request(path, new 
Runnable() {
-258public void run() {
-259  long offset = 0;
-260  long end = 0;
-261  try {
-262end = 
getTrailer().getLoadOnOpenDataOffset();
-263if (LOG.isTraceEnabled()) {
-264  LOG.trace("Prefetch start " 
+ getPathOffsetEndStr(path, offset, end));
-265}
-266// TODO: Could we use block 
iterator in here? Would that get stuff into the cache?
-267HFileBlock prevBlock = 
null;
-268while (offset  end) {
-269  if (Thread.interrupted()) 
{
-270break;
-271  }
-272  // Perhaps we got our block 
from cache? Unlikely as this may be, if it happens, then
-273  // the 
internal-to-hfileblock thread local which holds the overread that gets the
-274  // next header, will not 
have happened...so, pass in the onDiskSize gotten from the
-275  // cached block. This 
'optimization' triggers extremely rarely I'd say.
-276  long onDiskSize = prevBlock 
!= null? prevBlock.getNextBlockOnDiskSize(): -1;
-277  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-278  null, null);
-279  // Need not update the 
current block. Ideally here the readBlock won't find the
-280  // block in cache. We call 
this readBlock so that block data is read from FS and
-281  // cached in BC. So there 
is no reference count increment that happens here.
-282  // The return will ideally 
be a noop because 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/809180c4/devapidocs/org/apache/hadoop/hbase/zookeeper/ZNodePaths.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZNodePaths.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZNodePaths.html
index 27a35c6..a23b44a 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZNodePaths.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZNodePaths.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":9,"i5":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -221,6 +221,10 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Deprecated.
 
 
+
+static char
+ZNODE_PATH_SEPARATOR
+
 
 
 
@@ -248,7 +252,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 Method Summary
 
-All MethodsInstance MethodsConcrete Methods
+All MethodsStatic MethodsInstance MethodsConcrete Methods
 
 Modifier and Type
 Method and Description
@@ -278,6 +282,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+joinZNode(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringprefix,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringsuffix)
+Join the prefix znode name with the suffix znode name to 
generate a proper
+ full znode name.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
@@ -303,13 +315,26 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
+
+
+
+
+
+ZNODE_PATH_SEPARATOR
+public static finalchar ZNODE_PATH_SEPARATOR
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 META_ZNODE_PREFIX
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String META_ZNODE_PREFIX
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String META_ZNODE_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -322,7 +347,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 baseZNode
-public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String baseZNode
+public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String baseZNode
 
 
 
@@ -331,7 +356,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 metaZNodePrefix
-public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String metaZNodePrefix
+public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String metaZNodePrefix
 
 
 
@@ -340,7 +365,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 metaReplicaZNodes
-public 
finalorg.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String metaReplicaZNodes
+public 
finalorg.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String metaReplicaZNodes
 
 
 
@@ -349,7 +374,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rsZNode
-public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZNode
+public finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZNode
 
 
 
@@ -358,7 +383,7 @@ extends 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a108018f/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
index ffb1464..da77107 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
@@ -6,7 +6,7 @@
 
 
 
-001/**
+001/*
 002 * Licensed to the Apache Software 
Foundation (ASF) under one
 003 * or more contributor license 
agreements.  See the NOTICE file
 004 * distributed with this work for 
additional information
@@ -128,9 +128,9 @@
 120// Get the replica count
 121int regionReplicaCount = 
hTableDescriptor.getRegionReplication();
 122
-123// Get the regions for the table 
from the memory
+123// Get the regions for the table 
from memory; get both online and offline regions ('true').
 124ListRegionInfo 
regionsOfTable =
-125
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
+125
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, 
true);
 126
 127if (regionReplicaCount  1) 
{
 128  int currentMaxReplica = 0;



[14/51] [partial] hbase-site git commit: Published site at .

2017-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b3f2bee/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
index 54e3200..8e60154 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
@@ -313,7 +313,7 @@ implements RegionObserver
-postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation, postCommitStoreFile,
 postCompact,
 postCompactSelection,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
 postIncrement,
 postInstantiateDeleteTracker,
 postLog
 Replay, postMemStoreCompaction,
 postMutationBeforeWAL,
 postOpen,
 postPut, postReplayWALs,
 postScannerClose,
 postScannerFilterRow,
 postScannerNext, postScannerOpen,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBatchMutate,
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompactSelection,
 preDelete,
 preExists,
 preFlush,
 preFlush,
 preGetOp,
 preIncrement, preIncrementAfterRowLock,
 preMemStoreCompaction,
 preMemStoreCompactionCompact,
 preMemStoreCompactionCompactScannerOpen,
 preOpen,
 prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preWALRestore
+postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation, postCommitStoreFile,
 postCompact,
 postCompactSelection,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
 postIncrement,
 postInstantiateDeleteTracker,
 postMemStoreCompaction, postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postScannerClose,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBatchMutate,
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompactSelection,
 preDelete,
 preExists,
 preFlush,
 preFlush,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preMemStoreCompaction,
 preMemStoreCompactionCompact,
 preMemStoreCompactionCompactScannerOpen,
 preOpen,
 prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preWALRestore
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b3f2bee/devapidocs/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.html
index 860df24..f0c79d7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/net/ConnectExcept
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b3f2bee/devapidocs/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.html
deleted file mode 100644
index ac45f37..000
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/RegionInRecoveryException.html
+++ /dev/null
@@ -1,337 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-RegionInRecoveryException (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2cef721c/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
index 396e574..c28f3c1 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
@@ -43,978 +43,977 @@
 035
 036import org.apache.hadoop.hbase.Cell;
 037import 
org.apache.hadoop.hbase.CellComparator;
-038import 
org.apache.hadoop.hbase.CellComparatorImpl;
-039import 
org.apache.hadoop.hbase.CellScannable;
-040import 
org.apache.hadoop.hbase.CellScanner;
-041import 
org.apache.hadoop.hbase.CellUtil;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.PrivateCellUtil;
-044import 
org.apache.hadoop.hbase.KeyValue;
-045import 
org.apache.hadoop.hbase.KeyValueUtil;
-046import 
org.apache.yetus.audience.InterfaceAudience;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048
-049/**
-050 * Single row result of a {@link Get} or 
{@link Scan} query.p
-051 *
-052 * This class is bNOT THREAD 
SAFE/b.p
-053 *
-054 * Convenience methods are available that 
return various {@link Map}
-055 * structures and values 
directly.p
-056 *
-057 * To get a complete mapping of all cells 
in the Result, which can include
-058 * multiple families and multiple 
versions, use {@link #getMap()}.p
-059 *
-060 * To get a mapping of each family to its 
columns (qualifiers and values),
-061 * including only the latest version of 
each, use {@link #getNoVersionMap()}.
-062 *
-063 * To get a mapping of qualifiers to 
latest values for an individual family use
-064 * {@link 
#getFamilyMap(byte[])}.p
-065 *
-066 * To get the latest value for a specific 
family and qualifier use
-067 * {@link #getValue(byte[], byte[])}.
-068 *
-069 * A Result is backed by an array of 
{@link Cell} objects, each representing
-070 * an HBase cell defined by the row, 
family, qualifier, timestamp, and value.p
-071 *
-072 * The underlying {@link Cell} objects 
can be accessed through the method {@link #listCells()}.
-073 * This will create a List from the 
internal Cell []. Better is to exploit the fact that
-074 * a new Result instance is a primed 
{@link CellScanner}; just call {@link #advance()} and
-075 * {@link #current()} to iterate over 
Cells as you would any {@link CellScanner}.
-076 * Call {@link #cellScanner()} to reset 
should you need to iterate the same Result over again
-077 * ({@link CellScanner}s are one-shot).
-078 *
-079 * If you need to overwrite a Result with 
another Result instance -- as in the old 'mapred'
-080 * RecordReader next invocations -- then 
create an empty Result with the null constructor and
-081 * in then use {@link 
#copyFrom(Result)}
-082 */
-083@InterfaceAudience.Public
-084public class Result implements 
CellScannable, CellScanner {
-085  private Cell[] cells;
-086  private Boolean exists; // if the query 
was just to check existence.
-087  private boolean stale = false;
-088
-089  /**
-090   * See {@link 
#mayHaveMoreCellsInRow()}.
-091   */
-092  private boolean mayHaveMoreCellsInRow = 
false;
-093  // We're not using java serialization.  
Transient here is just a marker to say
-094  // that this is where we cache row if 
we're ever asked for it.
-095  private transient byte [] row = null;
-096  // Ditto for familyMap.  It can be 
composed on fly from passed in kvs.
-097  private transient 
NavigableMapbyte[], NavigableMapbyte[], NavigableMapLong, 
byte[]
-098  familyMap = null;
-099
-100  private static 
ThreadLocalbyte[] localBuffer = new ThreadLocal();
-101  private static final int PAD_WIDTH = 
128;
-102  public static final Result EMPTY_RESULT 
= new Result(true);
-103
-104  private final static int 
INITIAL_CELLSCANNER_INDEX = -1;
-105
-106  /**
-107   * Index for where we are when Result 
is acting as a {@link CellScanner}.
-108   */
-109  private int cellScannerIndex = 
INITIAL_CELLSCANNER_INDEX;
-110  private RegionLoadStats stats;
-111
-112  private final boolean readonly;
-113
-114  private Cursor cursor = null;
-115
-116  /**
-117   * Creates an empty Result w/ no 
KeyValue payload; returns null if you call {@link #rawCells()}.
-118   * Use this to represent no results if 
{@code null} won't do or in old 'mapred' as opposed
-119   * to 'mapreduce' package MapReduce 
where you need to overwrite a Result instance with a
-120   * {@link #copyFrom(Result)} call.
-121   */
-122  public Result() {
-123this(false);
-124  }
-125
-126  /**
-127   * Allows to construct special purpose 
immutable Result objects,
-128   * such as EMPTY_RESULT.
-129   * @param readonly whether this Result 
instance is readonly
-130   */
-131  private Result(boolean readonly) {
-132this.readonly = readonly;
-133  }
-134
-135  /**
-136   * Instantiate a Result with the 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32453e2d/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index 531081e..a22e5ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -34,2832 +34,3011 @@
 026import java.util.Collections;
 027import java.util.EnumSet;
 028import java.util.HashMap;
-029import java.util.LinkedList;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicReference;
-037import java.util.function.BiConsumer;
-038import java.util.function.Function;
-039import java.util.regex.Pattern;
-040import java.util.stream.Collectors;
-041import java.util.stream.Stream;
-042
-043import org.apache.commons.io.IOUtils;
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.ClusterStatus;
-048import 
org.apache.hadoop.hbase.ClusterStatus.Option;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-054import 
org.apache.hadoop.hbase.RegionLoad;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.TableNotDisabledException;
-060import 
org.apache.hadoop.hbase.TableNotEnabledException;
-061import 
org.apache.hadoop.hbase.TableNotFoundException;
-062import 
org.apache.hadoop.hbase.UnknownRegionException;
-063import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-064import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-065import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-066import 
org.apache.hadoop.hbase.client.RawAsyncTable.CoprocessorCallable;
-067import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-068import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-069import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-070import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-080import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-081import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-082import 
org.apache.hadoop.hbase.util.Bytes;
-083import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-084import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-085import 
org.apache.hadoop.hbase.util.Pair;
-086import 
org.apache.yetus.audience.InterfaceAudience;
-087
-088import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-089import 
org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
-090import 
org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-099import 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/315ffef7/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/DecoderFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/DecoderFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/DecoderFactory.html
deleted file mode 100644
index 19e3b63..000
--- 
a/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/DecoderFactory.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory (Apache HBase 
3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory
-
-No usage of 
org.apache.hadoop.hbase.codec.prefixtree.decode.DecoderFactory
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/315ffef7/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/PrefixTreeArrayReversibleScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/PrefixTreeArrayReversibleScanner.html
 
b/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/PrefixTreeArrayReversibleScanner.html
deleted file mode 100644
index 2545bca..000
--- 
a/devapidocs/org/apache/hadoop/hbase/codec/prefixtree/decode/class-use/PrefixTreeArrayReversibleScanner.html
+++ /dev/null
@@ -1,169 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArrayReversibleScanner
 (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArrayReversibleScanner
-
-
-
-
-
-Packages that use PrefixTreeArrayReversibleScanner
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.codec.prefixtree.decode
-
-
-
-
-
-
-
-
-
-
-Uses of PrefixTreeArrayReversibleScanner
 in org.apache.hadoop.hbase.codec.prefixtree.decode
-
-Subclasses of PrefixTreeArrayReversibleScanner
 in org.apache.hadoop.hbase.codec.prefixtree.decode
-
-Modifier and Type
-Class and Description
-
-
-
-class
-PrefixTreeArraySearcher
-
- Searcher extends the capabilities of the Scanner + ReversibleScanner to add 
the ability to
- position itself on a requested Cell without scanning through cells before 
it.
-
-
-
-
-
-
-
-
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-


[14/51] [partial] hbase-site git commit: Published site at .

2017-11-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7d38bdbb/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
index 0d33cae..19fa457 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
@@ -26,564 +26,607 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
java.util.stream.Collectors.toList;
-021import static 
org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
-022import static 
org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-023import static 
org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+021import static 
org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
+022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+023
+024import com.google.protobuf.RpcChannel;
 025
-026import com.google.protobuf.RpcChannel;
-027
-028import java.io.IOException;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.List;
-032import java.util.Optional;
-033import 
java.util.concurrent.CompletableFuture;
-034import java.util.concurrent.TimeUnit;
-035import 
java.util.concurrent.atomic.AtomicBoolean;
-036import 
java.util.concurrent.atomic.AtomicInteger;
-037import java.util.function.Function;
-038
-039import 
org.apache.hadoop.conf.Configuration;
-040import 
org.apache.hadoop.hbase.CompareOperator;
-041import 
org.apache.hadoop.hbase.HRegionLocation;
-042import 
org.apache.hadoop.hbase.TableName;
-043import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder;
-044import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-045import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-046import 
org.apache.hadoop.hbase.util.Bytes;
-047import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-048import 
org.apache.yetus.audience.InterfaceAudience;
-049
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-051import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-052import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-054import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-055import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
-064
-065/**
-066 * The implementation of RawAsyncTable.
-067 */
-068@InterfaceAudience.Private
-069class RawAsyncTableImpl implements 
RawAsyncTable {
+026import java.io.IOException;
+027import java.util.ArrayList;
+028import java.util.Arrays;
+029import java.util.List;
+030import 
java.util.concurrent.CompletableFuture;
+031import java.util.concurrent.TimeUnit;
+032import 
java.util.concurrent.atomic.AtomicBoolean;
+033import 
java.util.concurrent.atomic.AtomicInteger;
+034import java.util.function.Function;
+035
+036import 
org.apache.hadoop.conf.Configuration;
+037import 
org.apache.hadoop.hbase.CompareOperator;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionLocation;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder;
+042import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+043import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+044import 
org.apache.hadoop.hbase.util.Bytes;
+045import 
org.apache.hadoop.hbase.util.ReflectionUtils;
+046import 
org.apache.yetus.audience.InterfaceAudience;
+047
+048import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+049import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
+050import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+051import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+052import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+053import 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/93ae3fc9/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index f634b6d..bc3123d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -1260,17 +1260,23 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-MemStoreCompactor
-The ongoing MemStore Compaction manager, dispatches a solo 
running compaction and interrupts
- the compaction if requested.
+MemStoreCompactionStrategy
+MemStoreCompactionStrategy is the root of a class hierarchy 
which defines the strategy for
+ choosing the next action to apply in an (in-memory) memstore compaction.
 
 
 
-MemStoreCompactor.Action
+MemStoreCompactionStrategy.Action
 Types of actions to be done on the pipeline upon 
MemStoreCompaction invocation.
 
 
 
+MemStoreCompactor
+The ongoing MemStore Compaction manager, dispatches a solo 
running compaction and interrupts
+ the compaction if requested.
+
+
+
 MemStoreFlusher
 Thread that flushes cache on request
 
@@ -1279,23 +1285,23 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  sleep time which is invariant.
 
 
-
+
 MemStoreFlusher.FlushHandler
 
-
+
 MemStoreFlusher.FlushQueueEntry
 
-
+
 MemStoreFlusher.FlushRegionEntry
 Datastructure used in the flush queue.
 
 
-
+
 MemStoreLAB
 A memstore-local allocation buffer.
 
 
-
+
 MemStoreSegmentsIterator
 The MemStoreSegmentsIterator is designed to perform one 
iteration over given list of segments
  For another iteration new instance of MemStoreSegmentsIterator needs to be 
created
@@ -1303,512 +1309,512 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  in each period of time
 
 
-
+
 MemStoreSize
 Reports the data size part and total heap space occupied by 
the MemStore.
 
 
-
+
 MemStoreSizing
 Accounting of current heap and data sizes.
 
 
-
+
 MemStoreSnapshot
 Holds details of the snapshot taken on a MemStore.
 
 
-
+
 MetricsHeapMemoryManager
 This class is for maintaining the various regionserver's 
heap memory manager statistics and
  publishing them through the metrics interfaces.
 
 
-
+
 MetricsHeapMemoryManagerSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  HeapMemoryManager in RegionServer into the hadoop metrics system.
 
 
-
+
 MetricsHeapMemoryManagerSourceImpl
 Hadoop2 implementation of 
MetricsHeapMemoryManagerSource.
 
 
-
+
 MetricsRegion
 This is the glue between the HRegion and whatever hadoop 
shim layer
  is loaded (hbase-hadoop1-compat or hbase-hadoop2-compat).
 
 
-
+
 MetricsRegionAggregateSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  multiple regions into the hadoop metrics system.
 
 
-
+
 MetricsRegionAggregateSourceImpl
 
-
+
 MetricsRegionServer
 
  This class is for maintaining the various regionserver statistics
  and publishing them through the metrics interfaces.
 
 
-
+
 MetricsRegionServerSource
 Interface for classes that expose metrics about the 
regionserver.
 
 
-
+
 MetricsRegionServerSourceFactory
 Interface of a factory to create Metrics Sources used 
inside of regionservers.
 
 
-
+
 MetricsRegionServerSourceFactoryImpl.FactoryStorage
 
-
+
 MetricsRegionServerWrapper
 This is the interface that will expose RegionServer 
information to hadoop1/hadoop2
  implementations of the MetricsRegionServerSource.
 
 
-
+
 MetricsRegionSource
 This interface will be implemented to allow single regions 
to push metrics into
  MetricsRegionAggregateSource that will in turn push data to the Hadoop 
metrics system.
 
 
-
+
 MetricsRegionWrapper
 Interface of class that will wrap an HRegion and export 
numbers so they can be
  used in MetricsRegionSource
 
 
-
+
 MetricsRegionWrapperImpl
 
-
+
 MetricsTable
 
-
+
 MetricsTableAggregateSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  multiple regions of a table into the hadoop metrics system.
 
 
-
+
 MetricsTableAggregateSourceImpl
 
-
+
 MetricsTableSource
 This interface will be implemented to allow region server 
to push table metrics into
  MetricsRegionAggregateSource that will in turn push data to the Hadoop 
metrics system.
 
 
-
+
 MetricsTableWrapperAggregate
 Interface of class that will wrap a MetricsTableSource and 
export numbers so they can be
  used in MetricsTableSource
 
 
-
+
 MetricsTableWrapperAggregateImpl.MetricsTableValues
 
-
+
 MiniBatchOperationInProgress
 Wraps together the mutations which are applied as a batch 
to the region and their operation
  status and WALEdits.
 
 
-
+
 MultiVersionConcurrencyControl
 Manages the read/write consistency.
 
 
-
+
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/35decbe4/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
index 87257da..add30e1 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
@@ -116,1006 +116,1008 @@
 108  private static final String 
CONF_BUFFER_SIZE = "snapshot.export.buffer.size";
 109  private static final String 
CONF_MAP_GROUP = "snapshot.export.default.map.group";
 110  private static final String 
CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb";
-111  protected static final String 
CONF_SKIP_TMP = "snapshot.export.skip.tmp";
-112
-113  static class Testing {
-114static final String CONF_TEST_FAILURE 
= "test.snapshot.export.failure";
-115static final String 
CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count";
-116int failuresCountToInject = 0;
-117int injectedFailureCount = 0;
-118  }
-119
-120  // Command line options and defaults.
-121  static final class Options {
-122static final Option SNAPSHOT = new 
Option(null, "snapshot", true, "Snapshot to restore.");
-123static final Option TARGET_NAME = new 
Option(null, "target", true,
-124"Target name for the 
snapshot.");
-125static final Option COPY_TO = new 
Option(null, "copy-to", true, "Remote "
-126+ "destination hdfs://");
-127static final Option COPY_FROM = new 
Option(null, "copy-from", true,
-128"Input folder hdfs:// (default 
hbase.rootdir)");
-129static final Option 
NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false,
-130"Do not verify checksum, use 
name+length only.");
-131static final Option NO_TARGET_VERIFY 
= new Option(null, "no-target-verify", false,
-132"Do not verify the integrity of 
the exported snapshot.");
-133static final Option OVERWRITE = new 
Option(null, "overwrite", false,
-134"Rewrite the snapshot manifest if 
already exists.");
-135static final Option CHUSER = new 
Option(null, "chuser", true,
-136"Change the owner of the files to 
the specified one.");
-137static final Option CHGROUP = new 
Option(null, "chgroup", true,
-138"Change the group of the files to 
the specified one.");
-139static final Option CHMOD = new 
Option(null, "chmod", true,
-140"Change the permission of the 
files to the specified one.");
-141static final Option MAPPERS = new 
Option(null, "mappers", true,
-142"Number of mappers to use during 
the copy (mapreduce.job.maps).");
-143static final Option BANDWIDTH = new 
Option(null, "bandwidth", true,
-144"Limit bandwidth to this value in 
MB/second.");
-145  }
-146
-147  // Export Map-Reduce Counters, to keep 
track of the progress
-148  public enum Counter {
-149MISSING_FILES, FILES_COPIED, 
FILES_SKIPPED, COPY_FAILED,
-150BYTES_EXPECTED, BYTES_SKIPPED, 
BYTES_COPIED
-151  }
-152
-153  private static class ExportMapper 
extends MapperBytesWritable, NullWritable,
-154  
 NullWritable, NullWritable {
-155private static final Log LOG = 
LogFactory.getLog(ExportMapper.class);
-156final static int REPORT_SIZE = 1 * 
1024 * 1024;
-157final static int BUFFER_SIZE = 64 * 
1024;
-158
-159private boolean verifyChecksum;
-160private String filesGroup;
-161private String filesUser;
-162private short filesMode;
-163private int bufferSize;
-164
-165private FileSystem outputFs;
-166private Path outputArchive;
-167private Path outputRoot;
-168
-169private FileSystem inputFs;
-170private Path inputArchive;
-171private Path inputRoot;
-172
-173private static Testing testing = new 
Testing();
-174
-175@Override
-176public void setup(Context context) 
throws IOException {
-177  Configuration conf = 
context.getConfiguration();
-178
-179  Configuration srcConf = 
HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
-180  Configuration destConf = 
HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
-181
-182  verifyChecksum = 
conf.getBoolean(CONF_CHECKSUM_VERIFY, true);
-183
-184  filesGroup = 
conf.get(CONF_FILES_GROUP);
-185  filesUser = 
conf.get(CONF_FILES_USER);
-186  filesMode = 
(short)conf.getInt(CONF_FILES_MODE, 0);
-187  outputRoot = new 
Path(conf.get(CONF_OUTPUT_ROOT));
-188  inputRoot = new 
Path(conf.get(CONF_INPUT_ROOT));
-189
-190  inputArchive = new Path(inputRoot, 
HConstants.HFILE_ARCHIVE_DIRECTORY);
-191  outputArchive = new 
Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
-192
-193  try {
-194srcConf.setBoolean("fs." + 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1d9053bc/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index a29e10c..e1232ef 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -1204,10 +1204,9 @@ implements 
-RegionScanner
-preScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
-  Scanscan,
-  RegionScanners)
+void
+preScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+  Scanscan)
 Called before the client opens a new scanner.
 
 
@@ -1464,7 +1463,7 @@ implements RegionObserver
-postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation, postCommitStoreFile,
 postCompact,
 postCompactSelection,
 postExists,
 postFlush,
 postFlush,
 postGetOp, postIncrement,
 postInstantiateDeleteTracker,
 postReplayWALs,
 postScannerNext, postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore, preCommitStoreFile,
 preCompactSelection,
 preFlush,
 prePrepareTimeStampForDeleteVersion,
 preReplayWALs,
 preStoreFileReaderOpen,
 preWALRestore
+postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation, postCommitStoreFile,
 postCompact,
 postCompactSelection,
 postExists,
 postFlush,
 postFlush,
 postGetOp, postIncrement,
 postInstantiateDeleteTracker,
 postReplayWALs,
 postScannerNext, postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore, preCommitStoreFile,
 preCompactScannerOpen,
 preComp
 actSelection, preFlush,
 preFlushScannerOpen,
 prePrepareTimeStampForDeleteVersion,
 preReplayWALs,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preWALRestore
 
 
 
@@ -3869,21 +3868,18 @@ implements 
+
 
 
 
 
 preScannerOpen
-publicRegionScannerpreScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
-Scanscan,
-RegionScanners)
- throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
-Description copied from 
interface:RegionObserver
+publicvoidpreScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+   Scanscan)
+throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Description copied from 
interface:RegionObserver
 Called before the client opens a new scanner.
  
- Call CoprocessorEnvironment#bypass to skip default actions
- 
  Call CoprocessorEnvironment#complete to skip any subsequent chained
  coprocessors
  
@@ -3891,14 +3887,10 @@ implements Specified by:
-preScannerOpenin
 interfaceRegionObserver
+preScannerOpenin
 interfaceRegionObserver
 Parameters:
 c - the environment provided by the region server
 scan - the Scan specification
-s - if not null, the base scanner
-Returns:
-an RegionScanner instance to use instead of the base scanner if
- overriding default behavior, null otherwise
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -3910,7 +3902,7 @@ implements 
 
 postScannerOpen
-publicRegionScannerpostScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+publicRegionScannerpostScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
  Scanscan,
  RegionScanners)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3942,7 +3934,7 @@ implements 
 
 preScannerNext
-publicbooleanpreScannerNext(ObserverContextRegionCoprocessorEnvironmentc,
+publicbooleanpreScannerNext(ObserverContextRegionCoprocessorEnvironmentc,
   InternalScanners,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResultresult,
   intlimit,
@@ -3982,7 +3974,7 @@ implements 
 
 preScannerClose
-publicvoidpreScannerClose(ObserverContextRegionCoprocessorEnvironmentc,
+publicvoidpreScannerClose(ObserverContextRegionCoprocessorEnvironmentc,
 InternalScanners)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5018ccb3/devapidocs/org/apache/hadoop/hbase/ChoreService.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ChoreService.html 
b/devapidocs/org/apache/hadoop/hbase/ChoreService.html
index f851a4c..a99075c 100644
--- a/devapidocs/org/apache/hadoop/hbase/ChoreService.html
+++ b/devapidocs/org/apache/hadoop/hbase/ChoreService.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -790,7 +790,7 @@ publicvoid
 
-PrevClass
+PrevClass
 NextClass
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5018ccb3/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html 
b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
index e3e6898..cecadde 100644
--- a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
+++ b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, CellUtil.TagRewriteByteBufferCell, CellUtil.TagRewriteCell, CellUtil.ValueAndTagRewriteByteBufferCell, CellUtil.ValueAndTagRewriteCell, 
IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
+BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5018ccb3/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html 
b/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
index 5095197..521a727 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
@@ -272,7 +272,7 @@ the order they are declared.
 
 
 values
-public staticHealthChecker.HealthCheckerExitStatus[]values()
+public staticHealthChecker.HealthCheckerExitStatus[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -292,7 +292,7 @@ for (HealthChecker.HealthCheckerExitStatus c : 
HealthChecker.HealthCheckerExitSt
 
 
 valueOf
-public staticHealthChecker.HealthCheckerExitStatusvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticHealthChecker.HealthCheckerExitStatusvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5018ccb3/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html 
b/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
index 5c9c440..a4a63b5 100644
--- a/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
+++ b/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class IndividualBytesFieldCell
+public class IndividualBytesFieldCell
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ExtendedCell
 
@@ -476,7 +476,7 @@ implements 
 
 FIXED_OVERHEAD
-private static finallong FIXED_OVERHEAD
+private static finallong FIXED_OVERHEAD
 
 
 
@@ -485,7 +485,7 @@ implements 
 
 row
-private finalbyte[] row
+private finalbyte[] row
 
 
 
@@ -494,7 +494,7 @@ implements 
 
 rOffset
-private finalint rOffset
+private finalint rOffset
 
 
 
@@ -503,7 +503,7 @@ implements 
 
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/00c22388/devapidocs/org/apache/hadoop/hbase/client/AsyncTableBase.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableBase.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableBase.html
index 289e895..b45f132 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableBase.html
@@ -380,10 +380,14 @@ public interface 
 
 getRpcTimeout
-longgetRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+longgetRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Get timeout of each rpc request in this Table instance. It 
will be overridden by a more
  specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
 
+Parameters:
+unit - the unit of time the timeout to be represented in
+Returns:
+rpc timeout in the specified time unit
 See Also:
 getReadRpcTimeout(TimeUnit),
 
 getWriteRpcTimeout(TimeUnit)
@@ -396,8 +400,14 @@ public interface 
 
 getReadRpcTimeout
-longgetReadRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+longgetReadRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Get timeout of each rpc read request in this Table 
instance.
+
+Parameters:
+unit - the unit of time the timeout to be represented in
+Returns:
+read rpc timeout in the specified time unit
+
 
 
 
@@ -406,8 +416,14 @@ public interface 
 
 getWriteRpcTimeout
-longgetWriteRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+longgetWriteRpcTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Get timeout of each rpc write request in this Table 
instance.
+
+Parameters:
+unit - the unit of time the timeout to be represented in
+Returns:
+write rpc timeout in the specified time unit
+
 
 
 
@@ -416,8 +432,14 @@ public interface 
 
 getOperationTimeout
-longgetOperationTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+longgetOperationTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Get timeout of each operation in Table instance.
+
+Parameters:
+unit - the unit of time the timeout to be represented in
+Returns:
+operation rpc timeout in the specified time unit
+
 
 
 
@@ -426,9 +448,15 @@ public interface 
 
 getScanTimeout
-longgetScanTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+longgetScanTimeout(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Get the timeout of a single operation in a scan. It works 
like operation timeout for other
  operations.
+
+Parameters:
+unit - the unit of time the timeout to be represented in
+Returns:
+scan rpc timeout in the specified time unit
+
 
 
 
@@ -437,7 +465,7 @@ public interface 
 
 exists
-defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Booleanexists(Getget)
+defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Booleanexists(Getget)
 Test for the existence of columns in the table, as 
specified by the Get.
  
  This will return true if the Get matches one or more keys, false if not.
@@ -456,7 +484,7 @@ public interface 
 
 get
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResultget(Getget)

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21726f5a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index f014424..32cc2f6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-publicRegionInfogetRegionInfo()
+publicRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8847591c/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 1dd4126..c9aa50a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.CompactionChecker
+private static class HRegionServer.CompactionChecker
 extends ScheduledChore
 
 
@@ -233,7 +233,7 @@ extends 
 
 instance
-private finalHRegionServer instance
+private finalHRegionServer instance
 
 
 
@@ -242,7 +242,7 @@ extends 
 
 majorCompactPriority
-private finalint majorCompactPriority
+private finalint majorCompactPriority
 
 
 
@@ -251,7 +251,7 @@ extends 
 
 DEFAULT_PRIORITY
-private static finalint DEFAULT_PRIORITY
+private static finalint DEFAULT_PRIORITY
 
 See Also:
 Constant
 Field Values
@@ -264,7 +264,7 @@ extends 
 
 iteration
-privatelong iteration
+privatelong iteration
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 CompactionChecker
-CompactionChecker(HRegionServerh,
+CompactionChecker(HRegionServerh,
   intsleepTime,
   Stoppablestopper)
 
@@ -300,7 +300,7 @@ extends 
 
 chore
-protectedvoidchore()
+protectedvoidchore()
 Description copied from 
class:ScheduledChore
 The task to execute on each scheduled execution of the 
Chore
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8847591c/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 3d6dfd7..baa9abf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 seqNum
-private finallong seqNum
+private finallong seqNum
 
 
 
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ts
-private finallong ts
+private finallong ts
 
 
 
@@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MovedRegionInfo
-publicMovedRegionInfo(ServerNameserverName,
+publicMovedRegionInfo(ServerNameserverName,
longcloseSeqNum)
 
 
@@ -271,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getSeqNum
-publiclonggetSeqNum()
+publiclonggetSeqNum()
 
 
 
@@ -289,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMoveTime
-publiclonggetMoveTime()
+publiclonggetMoveTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8847591c/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index ba33386..f1e7550 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static final class HRegionServer.MovedRegionsCleaner
+protected static final class HRegionServer.MovedRegionsCleaner
 extends ScheduledChore
 implements Stoppable
 Creates a Chore thread to clean the moved region 
cache.
@@ -242,7 +242,7 @@ implements 
 
 regionServer
-privateHRegionServer regionServer
+privateHRegionServer regionServer
 
 
 
@@ -251,7 +251,7 @@ implements 
 
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/41a7fcc5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 12fe16f..b1e0997 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -1960,6279 +1960,6285 @@
 1952  protected void 
doRegionCompactionPrep() throws IOException {
 1953  }
 1954
-1955  @Override
-1956  public void triggerMajorCompaction() 
throws IOException {
-1957
stores.values().forEach(HStore::triggerMajorCompaction);
-1958  }
-1959
-1960  /**
-1961   * Synchronously compact all stores in 
the region.
-1962   * pThis operation could block 
for a long time, so don't call it from a
-1963   * time-sensitive thread.
-1964   * pNote that no locks are 
taken to prevent possible conflicts between
-1965   * compaction and splitting 
activities. The regionserver does not normally compact
-1966   * and split in parallel. However by 
calling this method you may introduce
-1967   * unexpected and unhandled 
concurrency. Don't do this unless you know what
-1968   * you are doing.
-1969   *
-1970   * @param majorCompaction True to 
force a major compaction regardless of thresholds
-1971   * @throws IOException
-1972   */
-1973  public void compact(boolean 
majorCompaction) throws IOException {
-1974if (majorCompaction) {
-1975  triggerMajorCompaction();
-1976}
-1977for (HStore s : stores.values()) {
-1978  OptionalCompactionContext 
compaction = s.requestCompaction();
-1979  if (compaction.isPresent()) {
-1980ThroughputController controller 
= null;
-1981if (rsServices != null) {
-1982  controller = 
CompactionThroughputControllerFactory.create(rsServices, conf);
-1983}
-1984if (controller == null) {
-1985  controller = 
NoLimitThroughputController.INSTANCE;
-1986}
-1987compact(compaction.get(), s, 
controller, null);
-1988  }
-1989}
-1990  }
-1991
-1992  /**
-1993   * This is a helper function that 
compact all the stores synchronously.
-1994   * p
-1995   * It is used by utilities and 
testing
-1996   */
-1997  @VisibleForTesting
-1998  public void compactStores() throws 
IOException {
-1999for (HStore s : stores.values()) {
-2000  OptionalCompactionContext 
compaction = s.requestCompaction();
-2001  if (compaction.isPresent()) {
-2002compact(compaction.get(), s, 
NoLimitThroughputController.INSTANCE, null);
-2003  }
-2004}
-2005  }
-2006
-2007  /**
-2008   * This is a helper function that 
compact the given store.
-2009   * p
-2010   * It is used by utilities and 
testing
-2011   */
-2012  @VisibleForTesting
-2013  void compactStore(byte[] family, 
ThroughputController throughputController) throws IOException {
-2014HStore s = getStore(family);
-2015OptionalCompactionContext 
compaction = s.requestCompaction();
-2016if (compaction.isPresent()) {
-2017  compact(compaction.get(), s, 
throughputController, null);
-2018}
-2019  }
-2020
-2021  /**
-2022   * Called by compaction thread and 
after region is opened to compact the
-2023   * HStores if necessary.
-2024   *
-2025   * pThis operation could block 
for a long time, so don't call it from a
-2026   * time-sensitive thread.
-2027   *
-2028   * Note that no locking is necessary 
at this level because compaction only
-2029   * conflicts with a region split, and 
that cannot happen because the region
-2030   * server does them sequentially and 
not in parallel.
-2031   *
-2032   * @param compaction Compaction 
details, obtained by requestCompaction()
-2033   * @param throughputController
-2034   * @return whether the compaction 
completed
-2035   */
+1955  /**
+1956   * Synchronously compact all stores in 
the region.
+1957   * pThis operation could block 
for a long time, so don't call it from a
+1958   * time-sensitive thread.
+1959   * pNote that no locks are 
taken to prevent possible conflicts between
+1960   * compaction and splitting 
activities. The regionserver does not normally compact
+1961   * and split in parallel. However by 
calling this method you may introduce
+1962   * unexpected and unhandled 
concurrency. Don't do this unless you know what
+1963   * you are doing.
+1964   *
+1965   * @param majorCompaction True to 
force a major compaction regardless of thresholds
+1966   * @throws IOException
+1967   */
+1968  public void compact(boolean 
majorCompaction) throws IOException {
+1969if (majorCompaction) {
+1970  
stores.values().forEach(HStore::triggerMajorCompaction);
+1971}
+1972for (HStore s : stores.values()) {
+1973  

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f94a4c5/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
index 768d30b..0f7de2a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
@@ -6,7 +6,7 @@
 
 
 
-001/**
+001/*
 002 * Licensed to the Apache Software 
Foundation (ASF) under one
 003 * or more contributor license 
agreements.  See the NOTICE file
 004 * distributed with this work for 
additional information
@@ -132,352 +132,372 @@
 124  }
 125
 126  /**
-127   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-128   * deserialization, networking, etc..) 
when talking to a local server.
-129   * @param conf the current 
configuration
-130   * @param pool the thread pool to use 
for batch operations
-131   * @param user the user the connection 
is for
-132   * @param serverName the local server 
name
-133   * @param admin the admin interface of 
the local server
-134   * @param client the client interface 
of the local server
-135   * @return an short-circuit 
connection.
-136   * @throws IOException if IO failure 
occurred
-137   */
-138  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-139  ExecutorService pool, User user, 
final ServerName serverName,
-140  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-141  throws IOException {
-142if (user == null) {
-143  user = 
UserProvider.instantiate(conf).getCurrent();
-144}
-145return new 
ConnectionImplementation(conf, pool, user) {
-146  @Override
-147  public 
AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
-148return serverName.equals(sn) ? 
admin : super.getAdmin(sn);
-149  }
-150
-151  @Override
-152  public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-153return serverName.equals(sn) ? 
client : super.getClient(sn);
-154  }
-155
-156  @Override
-157  public MasterKeepAliveConnection 
getKeepAliveMasterService()
-158  throws 
MasterNotRunningException {
-159if (!(client instanceof 
MasterService.BlockingInterface)) {
-160  return 
super.getKeepAliveMasterService();
-161} else {
-162  return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface) client);
-163}
-164  }
-165};
-166  }
-167
-168  /**
-169   * Setup the connection class, so that 
it will not depend on master being online. Used for testing
-170   * @param conf configuration to set
-171   */
-172  @VisibleForTesting
-173  public static void 
setupMasterlessConnection(Configuration conf) {
-174
conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
MasterlessConnection.class.getName());
-175  }
-176
-177  /**
-178   * Some tests shut down the master. But 
table availability is a master RPC which is performed on
-179   * region re-lookups.
-180   */
-181  static class MasterlessConnection 
extends ConnectionImplementation {
-182MasterlessConnection(Configuration 
conf, ExecutorService pool, User user) throws IOException {
-183  super(conf, pool, user);
+127   * A ClusterConnection that will 
short-circuit RPC making direct invocations against the
+128   * localhost if the invocation target 
is 'this' server; save on network and protobuf
+129   * invocations.
+130   */
+131  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
+132  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
+133private final ServerName 
serverName;
+134private final 
AdminService.BlockingInterface localHostAdmin;
+135private final 
ClientService.BlockingInterface localHostClient;
+136
+137private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
+138ServerName serverName, 
AdminService.BlockingInterface admin,
+139ClientService.BlockingInterface 
client)
+140throws IOException {
+141  super(conf, pool, user);
+142  this.serverName = serverName;
+143  this.localHostAdmin = admin;
+144  this.localHostClient = client;
+145}
+146
+147@Override
+148public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
+149  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
+150}
+151
+152@Override
+153public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
+154  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
+155}
+156
+157

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c0c4a947/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index ceaf546..6fd592d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HMaster.InitializationMonitor
+private static class HMaster.InitializationMonitor
 extends HasThread
 Protection against zombie master. Started once Master 
accepts active responsibility and
  starts taking over responsibilities. Allows a finite time window before 
giving up ownership.
@@ -250,7 +250,7 @@ extends 
 
 TIMEOUT_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TIMEOUT_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TIMEOUT_KEY
 The amount of time in milliseconds to sleep before checking 
initialization status.
 
 See Also:
@@ -264,7 +264,7 @@ extends 
 
 TIMEOUT_DEFAULT
-public static finallong TIMEOUT_DEFAULT
+public static finallong TIMEOUT_DEFAULT
 
 
 
@@ -273,7 +273,7 @@ extends 
 
 HALT_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HALT_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HALT_KEY
 When timeout expired and initialization has not complete, 
call http://docs.oracle.com/javase/8/docs/api/java/lang/System.html?is-external=true#exit-int-;
 title="class or interface in java.lang">System.exit(int) when
  true, do nothing otherwise.
 
@@ -288,7 +288,7 @@ extends 
 
 HALT_DEFAULT
-public static finalboolean HALT_DEFAULT
+public static finalboolean HALT_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -301,7 +301,7 @@ extends 
 
 master
-private finalHMaster master
+private finalHMaster master
 
 
 
@@ -310,7 +310,7 @@ extends 
 
 timeout
-private finallong timeout
+private finallong timeout
 
 
 
@@ -319,7 +319,7 @@ extends 
 
 haltOnTimeout
-private finalboolean haltOnTimeout
+private finalboolean haltOnTimeout
 
 
 
@@ -336,7 +336,7 @@ extends 
 
 InitializationMonitor
-InitializationMonitor(HMastermaster)
+InitializationMonitor(HMastermaster)
 Creates a Thread that monitors the HMaster.isInitialized()
 state.
 
 
@@ -354,7 +354,7 @@ extends 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c0c4a947/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index eb053bb..26c398d 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class HMaster.RedirectServlet
+public static class HMaster.RedirectServlet
 extends javax.servlet.http.HttpServlet
 
 See Also:
@@ -243,7 +243,7 @@ extends javax.servlet.http.HttpServlet
 
 
 serialVersionUID
-private static finallong serialVersionUID
+private static finallong serialVersionUID
 
 See Also:
 Constant
 Field Values
@@ -256,7 +256,7 @@ extends javax.servlet.http.HttpServlet
 
 
 regionServerInfoPort
-private finalint regionServerInfoPort
+private finalint regionServerInfoPort
 
 
 
@@ -265,7 +265,7 @@ extends javax.servlet.http.HttpServlet
 
 
 regionServerHostname
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String regionServerHostname
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String regionServerHostname
 
 
 
@@ -282,7 +282,7 @@ extends javax.servlet.http.HttpServlet
 
 
 RedirectServlet
-publicRedirectServlet(InfoServerinfoServer,
+publicRedirectServlet(InfoServerinfoServer,

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ed0004f8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index b3f63f3..f4c5925 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -3168,71 +3168,75 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 void
-AssignmentManager.offlineRegion(RegionInforegionInfo)
+AssignmentManager.move(RegionInforegionInfo)
 
 
 void
+AssignmentManager.offlineRegion(RegionInforegionInfo)
+
+
+void
 AssignmentManager.onlineRegion(RegionInforegionInfo,
 ServerNameserverName)
 
-
+
 void
 RegionStates.removeFromFailedOpen(RegionInforegionInfo)
 
-
+
 void
 RegionStates.removeFromOfflineRegions(RegionInforegionInfo)
 
-
+
 private void
 AssignmentManager.sendRegionClosedNotification(RegionInforegionInfo)
 
-
+
 private void
 AssignmentManager.sendRegionOpenedNotification(RegionInforegionInfo,
 ServerNameserverName)
 
-
+
 private void
 AssignmentManager.setMetaInitialized(RegionInfometaRegionInfo,
   booleanisInitialized)
 
-
+
 protected void
 RegionTransitionProcedure.setRegionInfo(RegionInforegionInfo)
 
-
+
 private boolean
 AssignmentManager.shouldAssignFavoredNodes(RegionInforegion)
 
-
+
 void
 RegionStateStore.splitRegion(RegionInfoparent,
RegionInfohriA,
RegionInfohriB,
ServerNameserverName)
 
-
+
 void
 AssignmentManager.unassign(RegionInforegionInfo)
 
-
+
 void
 AssignmentManager.unassign(RegionInforegionInfo,
 booleanforceNewPlan)
 
-
+
 protected void
 RegionStateStore.updateMetaLocation(RegionInforegionInfo,
   ServerNameserverName)
 
-
+
 protected void
 RegionStateStore.updateRegionLocation(RegionInforegionInfo,
 RegionState.Statestate,
 Put...put)
 
-
+
 void
 RegionStateStore.updateRegionLocation(RegionInforegionInfo,
 RegionState.Statestate,
@@ -3241,7 +3245,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 longopenSeqNum,
 longpid)
 
-
+
 private void
 AssignmentManager.updateRegionMergeTransition(ServerNameserverName,

org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodestate,
@@ -3249,7 +3253,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
RegionInfohriA,
RegionInfohriB)
 
-
+
 private void
 AssignmentManager.updateRegionSplitTransition(ServerNameserverName,

org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodestate,
@@ -3257,19 +3261,19 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
RegionInfohriA,
RegionInfohriB)
 
-
+
 void
 RegionStates.updateRegionState(RegionInforegionInfo,
  RegionState.Statestate)
 
-
+
 private void
 AssignmentManager.updateRegionTransition(ServerNameserverName,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodestate,
   RegionInforegionInfo,
   longseqId)
 
-
+
 protected void
 RegionStateStore.updateUserRegionLocation(RegionInforegionInfo,
 RegionState.Statestate,
@@ -3278,7 +3282,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 longopenSeqNum,
 longpid)
 
-
+
 void
 RegionStateStore.RegionStateVisitor.visitRegionState(RegionInforegionInfo,
 RegionState.Statestate,
@@ -3286,16 +3290,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 ServerNamelastHost,
 longopenSeqNum)
 
-
+
 boolean
 AssignmentManager.waitForAssignment(RegionInforegionInfo)
 
-
+
 boolean
 AssignmentManager.waitForAssignment(RegionInforegionInfo,
  longtimeout)
 
-
+
 boolean
 AssignmentManager.waitMetaInitialized(Procedureproc,
RegionInforegionInfo)
@@ -5586,21 +5590,26 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  RegionInfohri)
 
 
+private ServerName
+RSGroupBasedLoadBalancer.findServerForRegion(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/47abd8e6/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 6150454..b712da3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -391,2559 +391,2494 @@
 383CompletableFutureVoid 
operate(TableName table);
 384  }
 385
-386  private 
CompletableFutureListTableDescriptor 
batchTableOperations(Pattern pattern,
-387  TableOperator operator, String 
operationType) {
-388
CompletableFutureListTableDescriptor future = new 
CompletableFuture();
-389ListTableDescriptor failed = 
new LinkedList();
-390
listTables(Optional.ofNullable(pattern), false).whenComplete(
-391  (tables, error) - {
-392if (error != null) {
-393  
future.completeExceptionally(error);
-394  return;
-395}
-396CompletableFuture[] futures =
-397tables.stream()
-398.map((table) - 
operator.operate(table.getTableName()).whenComplete((v, ex) - {
-399  if (ex != null) {
-400LOG.info("Failed to " 
+ operationType + " table " + table.getTableName(), ex);
-401failed.add(table);
-402  }
-403
})).CompletableFuture toArray(size - new 
CompletableFuture[size]);
-404
CompletableFuture.allOf(futures).thenAccept((v) - {
-405  future.complete(failed);
-406});
-407  });
-408return future;
-409  }
-410
-411  @Override
-412  public CompletableFutureBoolean 
tableExists(TableName tableName) {
-413return 
AsyncMetaTableAccessor.tableExists(metaTable, tableName);
+386  @Override
+387  public CompletableFutureBoolean 
tableExists(TableName tableName) {
+388return 
AsyncMetaTableAccessor.tableExists(metaTable, tableName);
+389  }
+390
+391  @Override
+392  public 
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
+393  boolean includeSysTables) {
+394return 
this.ListTableDescriptor newMasterCaller()
+395.action((controller, stub) - 
this
+396
.GetTableDescriptorsRequest, GetTableDescriptorsResponse, 
ListTableDescriptor call(
+397  controller, stub,
+398  
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables),
+399  (s, c, req, done) - 
s.getTableDescriptors(c, req, done),
+400  (resp) - 
ProtobufUtil.toTableDescriptorList(resp)))
+401.call();
+402  }
+403
+404  @Override
+405  public 
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
+406  boolean includeSysTables) {
+407return 
this.ListTableName newMasterCaller()
+408.action((controller, stub) - 
this
+409.GetTableNamesRequest, 
GetTableNamesResponse, ListTableName call(controller, stub,
+410  
RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables),
+411  (s, c, req, done) - 
s.getTableNames(c, req, done),
+412  (resp) - 
ProtobufUtil.toTableNameList(resp.getTableNamesList(
+413.call();
 414  }
 415
 416  @Override
-417  public 
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
-418  boolean includeSysTables) {
-419return 
this.ListTableDescriptor newMasterCaller()
-420.action((controller, stub) - 
this
-421
.GetTableDescriptorsRequest, GetTableDescriptorsResponse, 
ListTableDescriptor call(
-422  controller, stub,
-423  
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables),
-424  (s, c, req, done) - 
s.getTableDescriptors(c, req, done),
-425  (resp) - 
ProtobufUtil.toTableDescriptorList(resp)))
-426.call();
-427  }
-428
-429  @Override
-430  public 
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
-431  boolean includeSysTables) {
-432return 
this.ListTableName newMasterCaller()
-433.action((controller, stub) - 
this
-434.GetTableNamesRequest, 
GetTableNamesResponse, ListTableName call(controller, stub,
-435  
RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables),
-436  (s, c, req, done) - 
s.getTableNames(c, req, done),
-437  (resp) - 
ProtobufUtil.toTableNameList(resp.getTableNamesList(
-438.call();
-439  }
-440
-441  @Override
-442  public 
CompletableFutureTableDescriptor getTableDescriptor(TableName 
tableName) {
-443

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 5d138ea..81d256e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -2387,7 +2387,7 @@
 2379  return true;
 2380}
 2381long modifiedFlushCheckInterval = 
flushCheckInterval;
-2382if (getRegionInfo().isSystemTable() 

+2382if 
(getRegionInfo().getTable().isSystemTable() 
 2383getRegionInfo().getReplicaId() 
== RegionInfo.DEFAULT_REPLICA_ID) {
 2384  modifiedFlushCheckInterval = 
SYSTEM_CACHE_FLUSH_INTERVAL;
 2385}
@@ -7869,7 +7869,7 @@
 7861   */
 7862  public byte[] checkSplit() {
 7863// Can't split META
-7864if 
(this.getRegionInfo().isMetaTable() ||
+7864if 
(this.getRegionInfo().isMetaRegion() ||
 7865
TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) {
 7866  if (shouldForceSplit()) {
 7867LOG.warn("Cannot split meta 
region in HBase 0.20 and above");
@@ -7914,374 +7914,372 @@
 7906  }
 7907
 7908  /** @return the coprocessor host */
-7909  @Override
-7910  public RegionCoprocessorHost 
getCoprocessorHost() {
-7911return coprocessorHost;
-7912  }
-7913
-7914  /** @param coprocessorHost the new 
coprocessor host */
-7915  public void setCoprocessorHost(final 
RegionCoprocessorHost coprocessorHost) {
-7916this.coprocessorHost = 
coprocessorHost;
-7917  }
-7918
-7919  @Override
-7920  public void startRegionOperation() 
throws IOException {
-7921
startRegionOperation(Operation.ANY);
-7922  }
-7923
-7924  @Override
-7925  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH",
-7926justification="Intentional")
-7927  public void 
startRegionOperation(Operation op) throws IOException {
-7928switch (op) {
-7929  case GET:  // read operations
-7930  case SCAN:
-7931checkReadsEnabled();
-7932  case INCREMENT: // write 
operations
-7933  case APPEND:
-7934  case SPLIT_REGION:
-7935  case MERGE_REGION:
-7936  case PUT:
-7937  case DELETE:
-7938  case BATCH_MUTATE:
-7939  case COMPACT_REGION:
-7940  case SNAPSHOT:
-7941// when a region is in 
recovering state, no read, split, merge or snapshot is allowed
-7942if (isRecovering()  
(this.disallowWritesInRecovering ||
-7943  (op != Operation.PUT 
 op != Operation.DELETE  op != Operation.BATCH_MUTATE))) 
{
-7944  throw new 
RegionInRecoveryException(getRegionInfo().getRegionNameAsString() +
-7945" is recovering; cannot take 
reads");
-7946}
-7947break;
-7948  default:
-7949break;
-7950}
-7951if (op == Operation.MERGE_REGION || 
op == Operation.SPLIT_REGION
-7952|| op == 
Operation.COMPACT_REGION) {
-7953  // split, merge or compact region 
doesn't need to check the closing/closed state or lock the
-7954  // region
-7955  return;
-7956}
-7957if (this.closing.get()) {
-7958  throw new 
NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is 
closing");
-7959}
-7960lock(lock.readLock());
-7961if (this.closed.get()) {
-7962  lock.readLock().unlock();
-7963  throw new 
NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is 
closed");
-7964}
-7965// The unit for snapshot is a 
region. So, all stores for this region must be
-7966// prepared for snapshot operation 
before proceeding.
-7967if (op == Operation.SNAPSHOT) {
-7968  
stores.values().forEach(HStore::preSnapshotOperation);
-7969}
-7970try {
-7971  if (coprocessorHost != null) {
-7972
coprocessorHost.postStartRegionOperation(op);
-7973  }
-7974} catch (Exception e) {
-7975  lock.readLock().unlock();
-7976  throw new IOException(e);
-7977}
-7978  }
-7979
-7980  @Override
-7981  public void closeRegionOperation() 
throws IOException {
-7982
closeRegionOperation(Operation.ANY);
-7983  }
-7984
-7985  @Override
-7986  public void 
closeRegionOperation(Operation operation) throws IOException {
-7987if (operation == Operation.SNAPSHOT) 
{
-7988  
stores.values().forEach(HStore::postSnapshotOperation);
-7989}
-7990lock.readLock().unlock();
-7991if (coprocessorHost != null) {
-7992  
coprocessorHost.postCloseRegionOperation(operation);
-7993}
-7994  }
-7995
-7996  /**
-7997   * This method needs to be called 
before any public call that reads or
-7998   * modifies stores in bulk. 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7816cbde/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
index 55adc09..62e99e5 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype  Reactor 
Dependency Convergence
 
@@ -123,10 +123,10 @@
 305
 
 Number of unique artifacts (NOA):
-331
+329
 
 Number of version-conflicting artifacts (NOC):
-17
+16
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -191,75 +191,20 @@
 11.0.2
 
 
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c0571676/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 41e0c24..eac35d3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -110,8004 +110,8178 @@
 102import 
org.apache.hadoop.hbase.UnknownScannerException;
 103import 
org.apache.hadoop.hbase.client.Append;
 104import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-105import 
org.apache.hadoop.hbase.client.Delete;
-106import 
org.apache.hadoop.hbase.client.Durability;
-107import 
org.apache.hadoop.hbase.client.Get;
-108import 
org.apache.hadoop.hbase.client.Increment;
-109import 
org.apache.hadoop.hbase.client.IsolationLevel;
-110import 
org.apache.hadoop.hbase.client.Mutation;
-111import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-112import 
org.apache.hadoop.hbase.client.Put;
-113import 
org.apache.hadoop.hbase.client.RegionInfo;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-123import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-124import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-125import 
org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.HFile;
-135import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-136import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-137import 
org.apache.hadoop.hbase.ipc.RpcCall;
-138import 
org.apache.hadoop.hbase.ipc.RpcServer;
-139import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-140import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-141import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-142import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-143import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-144import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-145import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-146import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
-147import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-148import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-149import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-150import 
org.apache.hadoop.hbase.security.User;
-151import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-152import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-153import 
org.apache.hadoop.hbase.util.Bytes;
-154import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-155import 
org.apache.hadoop.hbase.util.ClassSize;
-156import 
org.apache.hadoop.hbase.util.CollectionUtils;
-157import 
org.apache.hadoop.hbase.util.CompressionTest;
-158import 
org.apache.hadoop.hbase.util.EncryptionTest;
-159import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-160import 
org.apache.hadoop.hbase.util.FSUtils;
-161import 
org.apache.hadoop.hbase.util.HashedBytes;
-162import 
org.apache.hadoop.hbase.util.Pair;
-163import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-164import 
org.apache.hadoop.hbase.util.Threads;
-165import org.apache.hadoop.hbase.wal.WAL;
-166import 
org.apache.hadoop.hbase.wal.WALEdit;
-167import 
org.apache.hadoop.hbase.wal.WALFactory;
-168import 
org.apache.hadoop.hbase.wal.WALKey;
-169import 
org.apache.hadoop.hbase.wal.WALSplitter;
-170import 
org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
-171import 
org.apache.hadoop.io.MultipleIOException;
-172import 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b838bdf0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 4a7f4ae..41e0c24 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -143,7970 +143,7971 @@
 135import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
 136import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 137import 
org.apache.hadoop.hbase.ipc.RpcCall;
-138import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-143import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-144import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-145import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-146import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-147import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
-148import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-149import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-150import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-151import 
org.apache.hadoop.hbase.security.User;
-152import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-153import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-154import 
org.apache.hadoop.hbase.util.Bytes;
-155import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-156import 
org.apache.hadoop.hbase.util.ClassSize;
-157import 
org.apache.hadoop.hbase.util.CollectionUtils;
-158import 
org.apache.hadoop.hbase.util.CompressionTest;
-159import 
org.apache.hadoop.hbase.util.EncryptionTest;
-160import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-161import 
org.apache.hadoop.hbase.util.FSUtils;
-162import 
org.apache.hadoop.hbase.util.HashedBytes;
-163import 
org.apache.hadoop.hbase.util.Pair;
-164import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-165import 
org.apache.hadoop.hbase.util.Threads;
-166import org.apache.hadoop.hbase.wal.WAL;
-167import 
org.apache.hadoop.hbase.wal.WALEdit;
-168import 
org.apache.hadoop.hbase.wal.WALFactory;
-169import 
org.apache.hadoop.hbase.wal.WALKey;
-170import 
org.apache.hadoop.hbase.wal.WALSplitter;
-171import 
org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
-172import 
org.apache.hadoop.io.MultipleIOException;
-173import 
org.apache.hadoop.util.StringUtils;
-174import org.apache.htrace.Trace;
-175import org.apache.htrace.TraceScope;
-176import 
org.apache.yetus.audience.InterfaceAudience;
-177
-178import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-179import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-180import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-181import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-182import 
org.apache.hadoop.hbase.shaded.com.google.common.io.Closeables;
-183import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-184import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-190import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-191import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-192import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-193import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-194import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-195import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-196import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-197import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-198import 

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/387c1112/devapidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html
index 9ebfbdb..4793e17 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public abstract class TableInputFormatBase
+public abstract class TableInputFormatBase
 extends org.apache.hadoop.mapreduce.InputFormatImmutableBytesWritable,Result
 A base for TableInputFormats. Receives 
a Connection, a TableName,
  an Scan instance 
that defines the input columns etc. Subclasses may use
@@ -158,7 +158,18 @@ extends org.apache.hadoop.mapreduce.InputFormat
 
 
 
@@ -194,20 +205,19 @@ extends org.apache.hadoop.mapreduce.InputFormatINITIALIZATION_ERROR
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-INPUT_AUTOBALANCE_MAXSKEWRATIO
-Specify if ratio for data skew in M/R jobs, it goes well 
with the enabling hbase.mapreduce
- .input.autobalance property.
-
-
-
 private static 
org.apache.commons.logging.Log
 LOG
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 MAPREDUCE_INPUT_AUTOBALANCE
-Specify if we enable auto-balance for input in M/R 
jobs.
+Specify if we enable auto-balance to set number of mappers 
in M/R jobs.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+MAX_AVERAGE_REGION_SIZE
+In auto-balance, we split input by ave region size, if 
calculated region size is too big, we can set it.
 
 
 
@@ -215,36 +225,35 @@ extends org.apache.hadoop.mapreduce.InputFormatNOT_INITIALIZED
 
 
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+NUM_MAPPERS_PER_REGION
+Set the number of Mappers for each region, all regions have 
same number of Mappers
+
+
+
 private RegionLocator
 regionLocator
 The RegionLocator of the 
table.
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
 title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddress,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 reverseDNSCacheMap
 The reverse DNS lookup cache mapping: IPAddress => 
HostName
 
 
-
+
 private Scan
 scan
 Holds the details for the internal scanner.
 
 
-
+
 private Table
 table
 The Table to 
scan.
 
 
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-TABLE_ROW_TEXTKEY
-Specify if the row key in table is text (ASCII between 
32~126),
- default is true.
-
-
 
 private TableRecordReader
 tableRecordReader
@@ -278,16 +287,15 @@ extends org.apache.hadoop.mapreduce.InputFormat
-All MethodsStatic MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete Methods
 
 Modifier and Type
 Method and Description
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.mapreduce.InputSplit
-calculateRebalancedSplits(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.mapreduce.InputSplitlist,
- org.apache.hadoop.mapreduce.JobContextcontext,
- longaverage)
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.mapreduce.InputSplit

[14/51] [partial] hbase-site git commit: Published site at .

2017-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3332caca/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
deleted file mode 100644
index 95e2ca9..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
+++ /dev/null
@@ -1,3841 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package 
org.apache.hadoop.hbase.regionserver;
-020
-021import 
javax.management.MalformedObjectNameException;
-022import javax.management.ObjectName;
-023import javax.servlet.http.HttpServlet;
-024import java.io.IOException;
-025import java.io.InterruptedIOException;
-026import 
java.lang.Thread.UncaughtExceptionHandler;
-027import java.lang.management.MemoryType;
-028import 
java.lang.management.MemoryUsage;
-029import java.lang.reflect.Constructor;
-030import java.net.BindException;
-031import java.net.InetAddress;
-032import java.net.InetSocketAddress;
-033import java.util.ArrayList;
-034import java.util.Collection;
-035import java.util.Collections;
-036import java.util.Comparator;
-037import java.util.HashMap;
-038import java.util.HashSet;
-039import java.util.Iterator;
-040import java.util.List;
-041import java.util.Map;
-042import java.util.Map.Entry;
-043import java.util.Objects;
-044import java.util.Set;
-045import java.util.SortedMap;
-046import java.util.TreeMap;
-047import java.util.TreeSet;
-048import 
java.util.concurrent.ConcurrentHashMap;
-049import 
java.util.concurrent.ConcurrentMap;
-050import 
java.util.concurrent.ConcurrentSkipListMap;
-051import 
java.util.concurrent.CountDownLatch;
-052import java.util.concurrent.TimeUnit;
-053import 
java.util.concurrent.atomic.AtomicBoolean;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import java.util.function.Function;
-056
-057import 
org.apache.commons.lang3.RandomUtils;
-058import 
org.apache.commons.lang3.SystemUtils;
-059import org.apache.commons.logging.Log;
-060import 
org.apache.commons.logging.LogFactory;
-061import 
org.apache.hadoop.conf.Configuration;
-062import org.apache.hadoop.fs.FileSystem;
-063import org.apache.hadoop.fs.Path;
-064import 
org.apache.hadoop.hbase.Abortable;
-065import 
org.apache.hadoop.hbase.ChoreService;
-066import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-067import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-068import 
org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
-069import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-070import 
org.apache.hadoop.hbase.HBaseConfiguration;
-071import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-072import 
org.apache.hadoop.hbase.HConstants;
-073import 
org.apache.hadoop.hbase.HealthCheckChore;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NotServingRegionException;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ScheduledChore;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.Stoppable;
-080import 
org.apache.hadoop.hbase.TableDescriptors;
-081import 
org.apache.hadoop.hbase.TableName;
-082import 
org.apache.hadoop.hbase.YouAreDeadException;
-083import 
org.apache.hadoop.hbase.ZNodeClearer;
-084import 
org.apache.hadoop.hbase.client.ClusterConnection;
-085import 
org.apache.hadoop.hbase.client.Connection;
-086import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-087import 
org.apache.hadoop.hbase.client.Put;
-088import 
org.apache.hadoop.hbase.client.RegionInfo;
-089import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-090import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.locking.EntityLock;

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
index a0b0122..5473602 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
@@ -53,369 +53,458 @@
 045import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 046import 
org.apache.hadoop.hbase.util.Bytes;
 047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import org.apache.hadoop.io.Writable;
-049
-050import java.io.ByteArrayOutputStream;
-051import java.io.DataInput;
-052import java.io.DataOutput;
-053import java.io.IOException;
-054import java.util.ArrayList;
-055import java.util.List;
-056import java.util.UUID;
-057
-058/**
-059 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
-060 */
-061@InterfaceAudience.Private
-062public class TableSnapshotInputFormatImpl 
{
-063  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
-064  // easy way to delegate access.
-065
-066  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
-067
-068  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
-069  // key for specifying the root dir of 
the restored snapshot
-070  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
-071
-072  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
-073  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
-074
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
-075  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
-076
-077  /**
-078   * Implementation class for InputSplit 
logic common between mapred and mapreduce.
-079   */
-080  public static class InputSplit 
implements Writable {
-081
-082private TableDescriptor htd;
-083private HRegionInfo regionInfo;
-084private String[] locations;
-085private String scan;
-086private String restoreDir;
-087
-088// constructor for mapreduce 
framework / Writable
-089public InputSplit() {}
-090
-091public InputSplit(TableDescriptor 
htd, HRegionInfo regionInfo, ListString locations,
-092Scan scan, Path restoreDir) {
-093  this.htd = htd;
-094  this.regionInfo = regionInfo;
-095  if (locations == null || 
locations.isEmpty()) {
-096this.locations = new String[0];
-097  } else {
-098this.locations = 
locations.toArray(new String[locations.size()]);
-099  }
-100  try {
-101this.scan = scan != null ? 
TableMapReduceUtil.convertScanToString(scan) : "";
-102  } catch (IOException e) {
-103LOG.warn("Failed to convert Scan 
to String", e);
-104  }
-105
-106  this.restoreDir = 
restoreDir.toString();
-107}
-108
-109public TableDescriptor getHtd() {
-110  return htd;
-111}
-112
-113public String getScan() {
-114  return scan;
-115}
-116
-117public String getRestoreDir() {
-118  return restoreDir;
+048import 
org.apache.hadoop.hbase.util.RegionSplitter;
+049import org.apache.hadoop.io.Writable;
+050
+051import java.io.ByteArrayOutputStream;
+052import java.io.DataInput;
+053import java.io.DataOutput;
+054import java.io.IOException;
+055import java.util.ArrayList;
+056import java.util.List;
+057import java.util.UUID;
+058
+059/**
+060 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
+061 */
+062@InterfaceAudience.Private
+063public class TableSnapshotInputFormatImpl 
{
+064  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
+065  // easy way to delegate access.
+066
+067  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
+068
+069  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+070  // key for specifying the root dir of 
the restored snapshot
+071  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+072
+073  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
+074  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
+075
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
+076  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
+077
+078  /**
+079   * For MapReduce jobs running multiple 
mappers per region, determines
+080   * what split algorithm we should be 
using to find split points for scanners.
+081   */
+082  public static final String SPLIT_ALGO = 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aea328be/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowColCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowColCell.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowColCell.html
index 26125e3..eb6a00d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowColCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowColCell.html
@@ -28,3193 +28,3241 @@
 020
 021import static 
org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
 022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023
-024import java.io.DataOutputStream;
-025import java.io.IOException;
-026import java.io.OutputStream;
-027import java.math.BigDecimal;
-028import java.nio.ByteBuffer;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.Iterator;
-032import java.util.List;
-033import java.util.Map.Entry;
-034import java.util.NavigableMap;
-035
-036import 
org.apache.hadoop.hbase.KeyValue.Type;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038import 
org.apache.yetus.audience.InterfaceAudience.Private;
-039import 
org.apache.hadoop.hbase.io.HeapSize;
-040import 
org.apache.hadoop.hbase.io.TagCompressionContext;
-041import 
org.apache.hadoop.hbase.io.util.Dictionary;
-042import 
org.apache.hadoop.hbase.io.util.StreamUtils;
-043import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-044import 
org.apache.hadoop.hbase.util.ByteRange;
-045import 
org.apache.hadoop.hbase.util.Bytes;
-046import 
org.apache.hadoop.hbase.util.ClassSize;
-047
-048/**
-049 * Utility methods helpful slinging 
{@link Cell} instances.
-050 * Some methods below are for internal 
use only and are marked InterfaceAudience.Private at the
-051 * method level.
-052 */
-053@InterfaceAudience.Public
-054public final class CellUtil {
-055
-056  /**
-057   * Private constructor to keep this 
class from being instantiated.
-058   */
-059  private CellUtil(){}
-060
-061  /*** ByteRange 
***/
-062
-063  public static ByteRange 
fillRowRange(Cell cell, ByteRange range) {
-064return range.set(cell.getRowArray(), 
cell.getRowOffset(), cell.getRowLength());
-065  }
-066
-067  public static ByteRange 
fillFamilyRange(Cell cell, ByteRange range) {
-068return 
range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
cell.getFamilyLength());
-069  }
-070
-071  public static ByteRange 
fillQualifierRange(Cell cell, ByteRange range) {
-072return 
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-073  cell.getQualifierLength());
-074  }
-075
-076  public static ByteRange 
fillValueRange(Cell cell, ByteRange range) {
-077return 
range.set(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-078  }
-079
-080  public static ByteRange 
fillTagRange(Cell cell, ByteRange range) {
-081return range.set(cell.getTagsArray(), 
cell.getTagsOffset(), cell.getTagsLength());
-082  }
-083
-084  /* get individual 
arrays for tests /
-085
-086  public static byte[] cloneRow(Cell 
cell){
-087byte[] output = new 
byte[cell.getRowLength()];
-088copyRowTo(cell, output, 0);
-089return output;
-090  }
-091
-092  public static byte[] cloneFamily(Cell 
cell){
-093byte[] output = new 
byte[cell.getFamilyLength()];
-094copyFamilyTo(cell, output, 0);
-095return output;
-096  }
-097
-098  public static byte[] 
cloneQualifier(Cell cell){
-099byte[] output = new 
byte[cell.getQualifierLength()];
-100copyQualifierTo(cell, output, 0);
-101return output;
-102  }
-103
-104  public static byte[] cloneValue(Cell 
cell){
-105byte[] output = new 
byte[cell.getValueLength()];
-106copyValueTo(cell, output, 0);
-107return output;
-108  }
-109
-110  public static byte[] cloneTags(Cell 
cell) {
-111byte[] output = new 
byte[cell.getTagsLength()];
-112copyTagTo(cell, output, 0);
-113return output;
-114  }
-115
-116  /**
-117   * Returns tag value in a new byte 
array. If server-side, use
-118   * {@link Tag#getValueArray()} with 
appropriate {@link Tag#getValueOffset()} and
-119   * {@link Tag#getValueLength()} instead 
to save on allocations.
-120   * @param cell
-121   * @return tag value in a new byte 
array.
-122   */
-123  public static byte[] getTagArray(Cell 
cell){
-124byte[] output = new 
byte[cell.getTagsLength()];
-125copyTagTo(cell, output, 0);
-126return output;
-127  }
-128
-129
-130  / copyTo 
**/
+023import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER;
+024import static 
org.apache.hadoop.hbase.KeyValue.getDelimiter;
+025import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIM_ARRAY;
+026
+027import java.io.DataOutputStream;
+028import java.io.IOException;

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index c08b707..7de2eb8 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -342,7 +342,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapRegionInfo,ServerName
 MetaTableAccessor.allTableRegions(Connectionconnection,
TableNametableName)
 Deprecated.
@@ -359,7 +359,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getDeadServerNames()
 
 
-static PairHRegionInfo,ServerName
+static PairRegionInfo,ServerName
 MetaTableAccessor.getRegion(Connectionconnection,
  byte[]regionName)
 Deprecated.
@@ -379,14 +379,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getServers()
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName)
 Do not use this method to get meta table regions, use 
methods in MetaTableLocator instead.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -394,7 +394,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 AsyncMetaTableAccessor.getTableRegionsAndLocations(RawAsyncTablemetaTable,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -412,8 +412,8 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static void
-MetaTableAccessor.addDaughter(Connectionconnection,
-   HRegionInforegionInfo,
+MetaTableAccessor.addDaughter(Connectionconnection,
+   RegionInforegionInfo,
ServerNamesn,
longopenSeqNum)
 Adds a daughter region entry to meta.
@@ -436,7 +436,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getLoad(ServerNamesn)
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,Result
+static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapRegionInfo,Result
 MetaTableAccessor.getServerUserRegions(Connectionconnection,
 ServerNameserverName)
 
@@ -467,10 +467,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static void
-MetaTableAccessor.mergeRegions(Connectionconnection,
-HRegionInfomergedRegion,
-HRegionInforegionA,
-HRegionInforegionB,
+MetaTableAccessor.mergeRegions(Connectionconnection,
+RegionInfomergedRegion,
+RegionInforegionA,
+RegionInforegionB,
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e4e1542/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
index 3718311..89c6164 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
@@ -37,401 +37,402 @@
 029import 
org.apache.hadoop.hbase.DoNotRetryIOException;
 030import 
org.apache.hadoop.hbase.HConstants;
 031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.client.Connection;
-034import 
org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
-035import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-036import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-037import 
org.apache.hadoop.hbase.ipc.RpcServer;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-040import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-042import 
org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener;
-043import 
org.apache.hadoop.hbase.security.User;
-044import 
org.apache.hadoop.hbase.security.UserProvider;
-045import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-046import 
org.apache.hadoop.hbase.security.token.TokenUtil;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-049import 
org.apache.hadoop.hbase.util.FSUtils;
-050import 
org.apache.hadoop.hbase.util.Methods;
-051import 
org.apache.hadoop.hbase.util.Pair;
-052import org.apache.hadoop.io.Text;
-053import 
org.apache.hadoop.security.UserGroupInformation;
-054import 
org.apache.hadoop.security.token.Token;
-055
-056import java.io.IOException;
-057import java.math.BigInteger;
-058import java.security.PrivilegedAction;
-059import java.security.SecureRandom;
-060import java.util.ArrayList;
-061import java.util.HashMap;
-062import java.util.List;
-063import java.util.Map;
-064
-065/**
-066 * Bulk loads in secure mode.
-067 *
-068 * This service addresses two issues:
-069 * ol
-070 * liMoving files in a secure 
filesystem wherein the HBase Client
-071 * and HBase Server are different 
filesystem users./li
-072 * liDoes moving in a secure 
manner. Assuming that the filesystem
-073 * is POSIX compliant./li
-074 * /ol
-075 *
-076 * The algorithm is as follows:
-077 * ol
-078 * liCreate an hbase owned 
staging directory which is
-079 * world traversable (711): {@code 
/hbase/staging}/li
-080 * liA user writes out data to 
his secure output directory: {@code /user/foo/data}/li
-081 * liA call is made to hbase to 
create a secret staging directory
-082 * which globally rwx (777): {@code 
/user/staging/averylongandrandomdirectoryname}/li
-083 * liThe user moves the data into 
the random staging directory,
-084 * then calls 
bulkLoadHFiles()/li
-085 * /ol
-086 *
-087 * Like delegation tokens the strength of 
the security lies in the length
-088 * and randomness of the secret 
directory.
-089 *
-090 */
-091@InterfaceAudience.Private
-092public class SecureBulkLoadManager {
-093
-094  public static final long VERSION = 
0L;
-095
-096  //320/5 = 64 characters
-097  private static final int RANDOM_WIDTH = 
320;
-098  private static final int RANDOM_RADIX = 
32;
-099
-100  private static final Log LOG = 
LogFactory.getLog(SecureBulkLoadManager.class);
-101
-102  private final static FsPermission 
PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx");
-103  private final static FsPermission 
PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x");
-104  private SecureRandom random;
-105  private FileSystem fs;
-106  private Configuration conf;
-107
-108  //two levels so it doesn't get deleted 
accidentally
-109  //no sticky bit in Hadoop 1.0
-110  private Path baseStagingDir;
-111
-112  private UserProvider userProvider;
-113  private Connection conn;
-114
-115  SecureBulkLoadManager(Configuration 
conf, Connection conn) {
-116this.conf = conf;
-117this.conn = conn;
-118  }
-119
-120  public void start() throws IOException 
{
-121random = new SecureRandom();
-122userProvider = 
UserProvider.instantiate(conf);
-123fs = FileSystem.get(conf);
-124baseStagingDir = new 
Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME);
-125
-126if 
(conf.get("hbase.bulkload.staging.dir") != null) {
-127  
LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload staging 
directory is "

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd3bcf4e/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileWriter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileWriter.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileWriter.html
index 5c20366..5e487cb 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileWriter.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileWriter.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class StoreFileWriter
+public class StoreFileWriter
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements CellSink, ShipperListener
 A StoreFile writer.  Use this to read/write HBase Store 
Files. It is package
@@ -388,7 +388,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -397,7 +397,7 @@ implements 
 
 dash
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Pattern dash
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Pattern dash
 
 
 
@@ -406,7 +406,7 @@ implements 
 
 generalBloomFilterWriter
-private finalBloomFilterWriter generalBloomFilterWriter
+private finalBloomFilterWriter generalBloomFilterWriter
 
 
 
@@ -415,7 +415,7 @@ implements 
 
 deleteFamilyBloomFilterWriter
-private finalBloomFilterWriter deleteFamilyBloomFilterWriter
+private finalBloomFilterWriter deleteFamilyBloomFilterWriter
 
 
 
@@ -424,7 +424,7 @@ implements 
 
 bloomType
-private finalBloomType bloomType
+private finalBloomType bloomType
 
 
 
@@ -433,7 +433,7 @@ implements 
 
 earliestPutTs
-privatelong earliestPutTs
+privatelong earliestPutTs
 
 
 
@@ -442,7 +442,7 @@ implements 
 
 deleteFamilyCnt
-privatelong deleteFamilyCnt
+privatelong deleteFamilyCnt
 
 
 
@@ -451,7 +451,7 @@ implements 
 
 bloomContext
-privateBloomContext bloomContext
+privateBloomContext bloomContext
 
 
 
@@ -460,7 +460,7 @@ implements 
 
 deleteFamilyBloomContext
-privateBloomContext deleteFamilyBloomContext
+privateBloomContext deleteFamilyBloomContext
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 timeRangeTrackerSet
-private finalboolean timeRangeTrackerSet
+private finalboolean timeRangeTrackerSet
 timeRangeTrackerSet is used to figure if we were passed a 
filled-out TimeRangeTracker or not.
  When flushing a memstore, we set the TimeRangeTracker that it accumulated 
during updates to
  memstore in here into this Writer and use this variable to indicate that we 
do not need to
@@ -483,7 +483,7 @@ implements 
 
 timeRangeTracker
-finalTimeRangeTracker timeRangeTracker
+finalTimeRangeTracker timeRangeTracker
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 writer
-protectedHFile.Writer writer
+protectedHFile.Writer writer
 
 
 
@@ -509,7 +509,7 @@ implements 
 
 StoreFileWriter
-StoreFileWriter(org.apache.hadoop.fs.FileSystemfs,
+StoreFileWriter(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.fs.Pathpath,
 org.apache.hadoop.conf.Configurationconf,
 CacheConfigcacheConf,
@@ -543,7 +543,7 @@ implements 
 
 StoreFileWriter
-privateStoreFileWriter(org.apache.hadoop.fs.FileSystemfs,
+privateStoreFileWriter(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.fs.Pathpath,
 org.apache.hadoop.conf.Configurationconf,
 CacheConfigcacheConf,
@@ -588,7 +588,7 @@ implements 
 
 appendMetadata
-publicvoidappendMetadata(longmaxSequenceId,
+publicvoidappendMetadata(longmaxSequenceId,
booleanmajorCompaction)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Writes meta data.
@@ -608,7 +608,7 @@ implements 
 
 appendMetadata
-publicvoidappendMetadata(longmaxSequenceId,
+publicvoidappendMetadata(longmaxSequenceId,
booleanmajorCompaction,
longmobCellsCount)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -630,7 +630,7 @@ implements 
 
 appendTrackedTimestampsToMetadata
-publicvoidappendTrackedTimestampsToMetadata()
+publicvoidappendTrackedTimestampsToMetadata()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Add TimestampRange and earliest put timestamp to 
Metadata
 
@@ -645,7 +645,7 @@ 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa7d6c0c/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
index e0b1774..f3d4c5e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
@@ -605,13 +605,13 @@
 597   * @param row
 598   * @param family
 599   * @param qualifier
-600   * @param op
+600   * @param compareOp
 601   * @param comparator @throws 
IOException
 602   */
 603  private boolean checkAndRowMutate(final 
Region region, final ListClientProtos.Action actions,
-604  final CellScanner cellScanner, 
byte[] row, byte[] family, byte[] qualifier,
-605  CompareOperator op, 
ByteArrayComparable comparator, RegionActionResult.Builder builder,
-606  ActivePolicyEnforcement 
spaceQuotaEnforcement) throws IOException {
+604final 
CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier,
+605
CompareOperator op, ByteArrayComparable comparator, RegionActionResult.Builder 
builder,
+606
ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
 607if 
(!region.getRegionInfo().isMetaTable()) {
 608  
regionServer.cacheFlusher.reclaimMemStoreMemory();
 609}
@@ -656,2842 +656,2846 @@
 648
 649  /**
 650   * Execute an append mutation.
-651   * @return result to return to client 
if default operation should be
-652   * bypassed as indicated by 
RegionObserver, null otherwise
-653   * @throws IOException
-654   */
-655  private Result append(final Region 
region, final OperationQuota quota,
-656  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
-657  ActivePolicyEnforcement 
spaceQuota)
-658  throws IOException {
-659long before = 
EnvironmentEdgeManager.currentTime();
-660Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
-661checkCellSizeLimit(region, append);
-662
spaceQuota.getPolicyEnforcement(region).check(append);
-663quota.addMutation(append);
-664Result r = null;
-665if (region.getCoprocessorHost() != 
null) {
-666  r = 
region.getCoprocessorHost().preAppend(append);
-667}
-668if (r == null) {
-669  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
-670  boolean success = false;
-671  try {
-672long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
-673if (canProceed) {
-674  r = region.append(append, 
nonceGroup, nonce);
-675} else {
-676  // convert duplicate append to 
get
-677  ListCell results = 
region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
-678nonceGroup, nonce);
-679  r = Result.create(results);
-680}
-681success = true;
-682  } finally {
-683if (canProceed) {
-684  endNonceOperation(mutation, 
nonceGroup, success);
-685}
-686  }
-687  if (region.getCoprocessorHost() != 
null) {
-688r = 
region.getCoprocessorHost().postAppend(append, r);
-689  }
-690}
-691if (regionServer.metricsRegionServer 
!= null) {
-692  
regionServer.metricsRegionServer.updateAppend(
-693
EnvironmentEdgeManager.currentTime() - before);
+651   *
+652   * @param region
+653   * @param m
+654   * @param cellScanner
+655   * @return result to return to client 
if default operation should be
+656   * bypassed as indicated by 
RegionObserver, null otherwise
+657   * @throws IOException
+658   */
+659  private Result append(final Region 
region, final OperationQuota quota,
+660  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
+661  ActivePolicyEnforcement 
spaceQuota)
+662  throws IOException {
+663long before = 
EnvironmentEdgeManager.currentTime();
+664Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
+665checkCellSizeLimit(region, append);
+666
spaceQuota.getPolicyEnforcement(region).check(append);
+667quota.addMutation(append);
+668Result r = null;
+669if (region.getCoprocessorHost() != 
null) {
+670  r = 
region.getCoprocessorHost().preAppend(append);
+671}
+672if (r == null) {
+673  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
+674  boolean success = false;
+675  try {
+676long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
+677if (canProceed) {
+678  r = region.append(append, 
nonceGroup, nonce);
+679} else {
+680

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/82166ec2/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
index 436b9a8..97e3009 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":18,"i1":50,"i2":18,"i3":50,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":18,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":50,"i26":50,"i27":18,"i28":50,"i29":18,"i30":18,"i31":18,"i32":50,"i33":18,"i34":18,"i35":50,"i36":50,"i37":18,"i38":18,"i39":50,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":18,"i47":18,"i48":18,"i49":18,"i50":18,"i51":18,"i52":18,"i53":50,"i54":18,"i55":50,"i56":18,"i57":18,"i58":50,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":50,"i81":18,"i82":18,"i83":18,"i84":50,"i85":18,"i86":18,"i87":50,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":50,"i100":50,"i101":18,"i102":18,"i103":50,"i104":18,"i105":18,"i106":18,"i107":18,"i108":50,"i
 
109":18,"i110":18,"i111":18,"i112":50,"i113":50,"i114":18,"i115":18,"i116":18,"i117":50,"i118":18,"i119":18,"i120":18,"i121":18,"i122":18,"i123":18,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":50,"i134":18,"i135":18,"i136":50,"i137":18,"i138":18,"i139":18,"i140":50,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18,"i155":18,"i156":18,"i157":18,"i158":18,"i159":18,"i160":18,"i161":18,"i162":18,"i163":18,"i164":18,"i165":18,"i166":50,"i167":18,"i168":18};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
+var methods = 
{"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":18,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":18,"i47":18,"i48":18,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
 
109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":18,"i121":18,"i122":18,"i123":18,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.LimitedPrivate(value="Coprocesssor")
  @InterfaceStability.Evolving
-public interface MasterObserver
+public interface MasterObserver
 extends Coprocessor
 Defines coprocessor hooks for interacting with operations 
on the
  HMaster 
process.
@@ -180,7 +180,7 @@ extends 
 Method Summary
 
-All MethodsInstance MethodsDefault MethodsDeprecated Methods
+All MethodsInstance MethodsDefault Methods
 
 Modifier and Type
 Method and Description
@@ -193,37 +193,13 @@ extends 
 
 default void
-postAddColumn(ObserverContextMasterCoprocessorEnvironmentctx,
- TableNametableName,
- HColumnDescriptorcolumnFamily)
-Deprecated.
-As of release 2.0.0, this 
will be removed in HBase 3.0.0
- (https://issues.apache.org/jira/browse/HBASE-13645;>HBASE-13645).
- Use postAddColumnFamily(ObserverContext,
 TableName, ColumnFamilyDescriptor).
-
-
-
-
-default void
 postAddColumnFamily(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
ColumnFamilyDescriptorcolumnFamily)
 Called after the new 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07338a0d/devapidocs/org/apache/hadoop/hbase/classification/tools/StabilityOptions.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/tools/StabilityOptions.html 
b/devapidocs/org/apache/hadoop/hbase/classification/tools/StabilityOptions.html
deleted file mode 100644
index a22a1f4..000
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/tools/StabilityOptions.html
+++ /dev/null
@@ -1,376 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-StabilityOptions (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9,"i1":9,"i2":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.classification.tools
-Class StabilityOptions
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.classification.tools.StabilityOptions
-
-
-
-
-
-
-
-
-final class StabilityOptions
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-EVOLVING_OPTION
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-STABLE_OPTION
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-UNSTABLE_OPTION
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Modifier
-Constructor and Description
-
-
-private 
-StabilityOptions()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[][]
-filterOptions(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[][]options)
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
-optionLength(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringoption)
-
-
-static void
-validOptions(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[][]options,
-com.sun.javadoc.DocErrorReporterreporter)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b63bb380/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html 
b/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
index 35d237d..ca1791b 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
@@ -258,7 +258,7 @@ implements RegionObserver
-postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation,
 postCommitStoreFile,
 postCompact,
 postCompact, postCompactSelection,
 postCompactSelection,
 postCompleteSplit,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
  postIncrement,
 postIncrementColumnValue,
 postInstantiateDeleteTracker,
 postLogReplay,
 postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postRollBackSplit,
 postScannerClose,
 postScannerFilterRow,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen,
 postSplit,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock
 , preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompact,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompac
 tSelection, preCompactSelection,
 preDelete,
 preExists,
 preFlush,
 preFlush,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preIncrementColumn
 Value, prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preRo
 llBackSplit, preScannerClose,
 preScannerNext,
 preScannerOpen,
 preSplit,
 preSplit, preSplitAfterPONR,
 preSplitBeforePONR,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preStoreScannerOpen,
 preWALRestore
+postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation,
 postCommitStoreFile,
 postCompact,
 postCompact, postCompactSelection,
 postCompactSelection,
 postCompleteSplit,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
 postIncrement,
 postIncrementColumnValue,
 postInstantiateDeleteTracker,
 postLogReplay,
 postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postRollBackSplit, postScannerClose,
 postScannerFilterRow,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen,
 postSplit,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompact,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactSelection,
 preCompactSelection,
 preDelete,
 preExists,
 preFlush,
 preFlush,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preIncrementColumnValue,
 prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preRollBackSplit,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preSplit,
 preSplit<
 /a>, preSplitAfterPONR,
 preSplitBeforePONR,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preStoreScannerOpen,
 preWALRestore
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b63bb380/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
index 384feb3..d44e3cb 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
@@ -1573,7 +1573,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private Pairhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
-ReplicationSourceWALReader.countDistinctRowKeysAndHFiles(WALEditedit)
+ReplicationSourceWALReader.countDistinctRowKeysAndHFiles(WALEditedit)
 Count the number of different row keys in the given edit 
because of mini-batching.
 
 
@@ -2104,7 +2104,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALSplitter.MutationReplay
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1490b3ab/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index 269954e..b9acba5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -729,19 +729,25 @@ service.
 
 
 
+static class
+ClusterStatus.Builder
+Builder for construct a ClusterStatus.
+
+
+
 class
 CompoundConfiguration
 Do a shallow merge of multiple KV configuration pools.
 
 
-
+
 class
 CoordinatedStateException
 Thrown by operations requiring coordination state access or 
manipulation
  when internal error within coordination engine (or other internal 
implementation) occurs.
 
 
-
+
 interface
 CoordinatedStateManager
 Implementations of this interface will keep and return to 
clients
@@ -749,182 +755,182 @@ service.
  coordinated operations.
 
 
-
+
 class
 CoordinatedStateManagerFactory
 Creates instance of CoordinatedStateManager
  based on configuration.
 
 
-
+
 interface
 CoprocessorEnvironment
 Coprocessor environment state.
 
 
-
+
 interface
 ExtendedCellBuilder
 For internal purpose.
 
 
-
+
 class
 ExtendedCellBuilderFactory
 
-
+
 class
 ExtendedCellBuilderImpl
 
-
+
 static class
 HConstants.OperationStatusCode
 Status codes used for return values of bulk 
operations.
 
 
-
+
 class
 HDFSBlocksDistribution
 Data structure to describe the distribution of HDFS blocks 
among hosts.
 
 
-
+
 class
 IndividualBytesFieldCell
 
-
+
 (package private) class
 IndividualBytesFieldCellBuilder
 
-
+
 class
 JitterScheduledThreadPoolExecutorImpl
 ScheduledThreadPoolExecutor that will add some jitter to 
the RunnableScheduledFuture.getDelay.
 
 
-
+
 class
 KeyValue
 An HBase Key/Value.
 
 
-
+
 (package private) class
 KeyValueBuilder
 
-
+
 class
 KeyValueTestUtil
 
-
+
 class
 KeyValueUtil
 static convenience methods for dealing with KeyValues and 
collections of KeyValues
 
 
-
+
 interface
 MetaMutationAnnotation
 The field or the parameter to which this annotation can be 
applied only when it
  holds mutations for hbase:meta table.
 
 
-
+
 class
 MetaTableAccessor
 Read/write operations on region and assignment information 
store in
  hbase:meta.
 
 
-
+
 static class
 MetaTableAccessor.QueryType
 
-
+
 class
 NoTagsByteBufferKeyValue
 An extension of the ByteBufferKeyValue where the tags 
length is always 0
 
 
-
+
 class
 NoTagsKeyValue
 An extension of the KeyValue where the tags length is 
always 0
 
 
-
+
 class
 RegionLocations
 Container for holding a list of HRegionLocation's 
that correspond to the
  same range.
 
 
-
+
 interface
 RegionStateListener
 The listener interface for receiving region state 
events.
 
 
-
+
 class
 ServiceNotRunningException
 
-
+
 class
 SizeCachedKeyValue
 This class is an extension to KeyValue where rowLen and 
keyLen are cached.
 
 
-
+
 class
 SizeCachedNoTagsKeyValue
 This class is an extension to ContentSizeCachedKeyValue 
where there are no tags in Cell.
 
 
-
+
 class
 SplitLogCounters
 Counters kept by the distributed WAL split log 
process.
 
 
-
+
 class
 SplitLogTask
 State of a WAL log split during distributed splitting.
 
 
-
+
 interface
 TableDescriptors
 Get, remove and modify table descriptors.
 
 
-
+
 interface
 Tag
 Tags are part of cells and helps to add metadata about 
them.
 
 
-
+
 class
 TagType
 
-
+
 class
 TagUtil
 
-
+
 class
 Version
 
-
+
 class
 YouAreDeadException
 This exception is thrown by the master when a region server 
reports and is
  already being processed as dead.
 
 
-
+
 class
 ZKNamespaceManager
 Class servers two purposes:
@@ -1046,33 +1052,29 @@ service.
 CellUtil.createNextOnRowCol(Cellcell)
 
 
-long
-ProcedureInfo.getClientAckTime()
-
-
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 AuthUtil.getGroupName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringaclKey)
 Returns the actual name for a group principal (stripped of 
the
  group prefix).
 
 
-
+
 static byte
 CellUtil.getQualifierByte(Cellcell,
 intindex)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionState
 ClusterStatus.getRegionsInTransition()
 
-
+
 static byte
 CellUtil.getRowByte(Cellcell,
   intindex)
 misc
 
 
-
+
 KeyValue.KVComparator
 TableName.getRowComparator()
 Deprecated.
@@ -1081,17 +1083,13 @@ service.
 
 
 
-
+
 static ServerName
 MetaTableAccessor.getServerName(Resultr,
  

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/985cca06/devapidocs/org/apache/hadoop/hbase/regionserver/Region.BulkLoadListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.BulkLoadListener.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.BulkLoadListener.html
index 10c76b7..115e76d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.BulkLoadListener.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.BulkLoadListener.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface Region.BulkLoadListener
+public static interface Region.BulkLoadListener
 Listener class to enable callers of
  bulkLoadHFile() to perform any necessary
  pre/post processing of a given bulkload call
@@ -174,7 +174,7 @@ var activeTableTab = "activeTableTab";
 
 
 prepareBulkLoad
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringprepareBulkLoad(byte[]family,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringprepareBulkLoad(byte[]family,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsrcPath,
booleancopyFile)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -196,7 +196,7 @@ var activeTableTab = "activeTableTab";
 
 
 doneBulkLoad
-voiddoneBulkLoad(byte[]family,
+voiddoneBulkLoad(byte[]family,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsrcPath)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Called after a successful HFile load
@@ -215,7 +215,7 @@ var activeTableTab = "activeTableTab";
 
 
 failedBulkLoad
-voidfailedBulkLoad(byte[]family,
+voidfailedBulkLoad(byte[]family,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsrcPath)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Called after a failed HFile load

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/985cca06/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
index e9f05b0..a597c26 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum Region.FlushResult.Result
+public static enum Region.FlushResult.Result
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumRegion.FlushResult.Result
 
 
@@ -216,7 +216,7 @@ the order they are declared.
 
 
 FLUSHED_NO_COMPACTION_NEEDED
-public static finalRegion.FlushResult.Result FLUSHED_NO_COMPACTION_NEEDED
+public static finalRegion.FlushResult.Result FLUSHED_NO_COMPACTION_NEEDED
 
 
 
@@ -225,7 +225,7 @@ the order they are declared.
 
 
 FLUSHED_COMPACTION_NEEDED
-public static finalRegion.FlushResult.Result FLUSHED_COMPACTION_NEEDED
+public static finalRegion.FlushResult.Result FLUSHED_COMPACTION_NEEDED
 
 
 
@@ -234,7 +234,7 @@ the order they are declared.
 
 
 CANNOT_FLUSH_MEMSTORE_EMPTY
-public static finalRegion.FlushResult.Result CANNOT_FLUSH_MEMSTORE_EMPTY
+public static finalRegion.FlushResult.Result CANNOT_FLUSH_MEMSTORE_EMPTY
 
 
 
@@ -243,7 +243,7 @@ the order they are declared.
 
 
 CANNOT_FLUSH
-public static finalRegion.FlushResult.Result CANNOT_FLUSH
+public static finalRegion.FlushResult.Result CANNOT_FLUSH
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/985cca06/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.html
index f7508d0..2384cc4 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.html
@@ -109,7 +109,7 @@ 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e2f20c83/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.html
index 9a639a1..ecf049a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcClientConfigHelper.html
@@ -33,7 +33,7 @@
 025import java.util.HashMap;
 026import java.util.Map;
 027
-028import 
org.apache.commons.lang.StringUtils;
+028import 
org.apache.commons.lang3.StringUtils;
 029import 
org.apache.hadoop.conf.Configuration;
 030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 031import 
org.apache.hadoop.hbase.util.Pair;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e2f20c83/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
index db914b6..875a1b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
@@ -42,7 +42,7 @@
 034import java.util.function.Function;
 035import java.util.stream.Collectors;
 036
-037import 
org.apache.commons.lang.StringUtils;
+037import 
org.apache.commons.lang3.StringUtils;
 038import org.apache.commons.logging.Log;
 039import 
org.apache.commons.logging.LogFactory;
 040import 
org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e2f20c83/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
index db914b6..875a1b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
@@ -42,7 +42,7 @@
 034import java.util.function.Function;
 035import java.util.stream.Collectors;
 036
-037import 
org.apache.commons.lang.StringUtils;
+037import 
org.apache.commons.lang3.StringUtils;
 038import org.apache.commons.logging.Log;
 039import 
org.apache.commons.logging.LogFactory;
 040import 
org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e2f20c83/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index db914b6..875a1b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -42,7 +42,7 @@
 034import java.util.function.Function;
 035import java.util.stream.Collectors;
 036
-037import 
org.apache.commons.lang.StringUtils;
+037import 
org.apache.commons.lang3.StringUtils;
 038import org.apache.commons.logging.Log;
 039import 
org.apache.commons.logging.LogFactory;
 040import 
org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e2f20c83/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.TsvParser.BadTsvLineException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.TsvParser.BadTsvLineException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.TsvParser.BadTsvLineException.html
index d9410d1..361ad39 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.TsvParser.BadTsvLineException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.TsvParser.BadTsvLineException.html
@@ -34,7 +34,7 @@
 026import java.util.HashSet;
 027import java.util.Set;
 028
-029import 
org.apache.commons.lang.StringUtils;
+029import 
org.apache.commons.lang3.StringUtils;
 030import org.apache.commons.logging.Log;
 031import 
org.apache.commons.logging.LogFactory;
 032import 
org.apache.hadoop.conf.Configuration;


[14/51] [partial] hbase-site git commit: Published site at .

2017-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e1eb0a07/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
index 08c80d4..868c46e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
@@ -1002,3395 +1002,3361 @@
 994return 
getAlterStatus(TableName.valueOf(tableName));
 995  }
 996
-997  /**
-998   * {@inheritDoc}
-999   * @deprecated Since 2.0. Will be 
removed in 3.0. Use
-1000   * {@link 
#addColumnFamily(TableName, ColumnFamilyDescriptor)} instead.
-1001   */
-1002  @Override
-1003  @Deprecated
-1004  public void addColumn(final TableName 
tableName, final HColumnDescriptor columnFamily)
-1005  throws IOException {
-1006addColumnFamily(tableName, 
columnFamily);
-1007  }
-1008
-1009  @Override
-1010  public void addColumnFamily(final 
TableName tableName, final ColumnFamilyDescriptor columnFamily)
-1011  throws IOException {
-1012get(addColumnFamilyAsync(tableName, 
columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
-1013  }
-1014
-1015  @Override
-1016  public FutureVoid 
addColumnFamilyAsync(final TableName tableName,
-1017  final ColumnFamilyDescriptor 
columnFamily) throws IOException {
-1018AddColumnResponse response =
-1019executeCallable(new 
MasterCallableAddColumnResponse(getConnection(),
-1020getRpcControllerFactory()) 
{
-1021  @Override
-1022  protected AddColumnResponse 
rpcCall() throws Exception {
-1023setPriority(tableName);
-1024AddColumnRequest req =
-1025
RequestConverter.buildAddColumnRequest(tableName, columnFamily, 
ng.getNonceGroup(),
-1026  ng.newNonce());
-1027return 
master.addColumn(getRpcController(), req);
-1028  }
-1029});
-1030return new 
AddColumnFamilyFuture(this, tableName, response);
-1031  }
-1032
-1033  private static class 
AddColumnFamilyFuture extends ModifyTableFuture {
-1034public AddColumnFamilyFuture(final 
HBaseAdmin admin, final TableName tableName,
-1035final AddColumnResponse 
response) {
-1036  super(admin, tableName, (response 
!= null  response.hasProcId()) ? response.getProcId()
-1037  : null);
-1038}
-1039
-1040@Override
-1041public String getOperationType() {
-1042  return "ADD_COLUMN_FAMILY";
-1043}
+997  @Override
+998  public void addColumnFamily(final 
TableName tableName, final ColumnFamilyDescriptor columnFamily)
+999  throws IOException {
+1000get(addColumnFamilyAsync(tableName, 
columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
+1001  }
+1002
+1003  @Override
+1004  public FutureVoid 
addColumnFamilyAsync(final TableName tableName,
+1005  final ColumnFamilyDescriptor 
columnFamily) throws IOException {
+1006AddColumnResponse response =
+1007executeCallable(new 
MasterCallableAddColumnResponse(getConnection(),
+1008getRpcControllerFactory()) 
{
+1009  @Override
+1010  protected AddColumnResponse 
rpcCall() throws Exception {
+1011setPriority(tableName);
+1012AddColumnRequest req =
+1013
RequestConverter.buildAddColumnRequest(tableName, columnFamily, 
ng.getNonceGroup(),
+1014  ng.newNonce());
+1015return 
master.addColumn(getRpcController(), req);
+1016  }
+1017});
+1018return new 
AddColumnFamilyFuture(this, tableName, response);
+1019  }
+1020
+1021  private static class 
AddColumnFamilyFuture extends ModifyTableFuture {
+1022public AddColumnFamilyFuture(final 
HBaseAdmin admin, final TableName tableName,
+1023final AddColumnResponse 
response) {
+1024  super(admin, tableName, (response 
!= null  response.hasProcId()) ? response.getProcId()
+1025  : null);
+1026}
+1027
+1028@Override
+1029public String getOperationType() {
+1030  return "ADD_COLUMN_FAMILY";
+1031}
+1032  }
+1033
+1034  /**
+1035   * {@inheritDoc}
+1036   * @deprecated Since 2.0. Will be 
removed in 3.0. Use
+1037   * {@link 
#deleteColumnFamily(TableName, byte[])} instead.
+1038   */
+1039  @Override
+1040  @Deprecated
+1041  public void deleteColumn(final 
TableName tableName, final byte[] columnFamily)
+1042  throws IOException {
+1043deleteColumnFamily(tableName, 
columnFamily);
 1044  }
 1045
-1046  /**
-1047   * {@inheritDoc}
-1048   * @deprecated Since 2.0. Will be 
removed in 3.0. Use
-1049   * {@link 
#deleteColumnFamily(TableName, byte[])} instead.
-1050   */
-1051  @Override
-1052  @Deprecated
-1053  public void deleteColumn(final 
TableName 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b220124/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
index 27e9c24..4928cd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
@@ -114,930 +114,903 @@
 106
 107  public static final String 
REPAIR_CMD_USAGE = "Usage: hbase backup repair\n";
 108
-109  public static final String 
CANCEL_CMD_USAGE = "Usage: hbase backup cancel backup_id\n"
-110  + "  backup_id   Backup image 
id\n";
-111
-112  public static final String 
SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n"
-113  + "  nameBackup set 
name\n"
-114  + "  tables  Comma 
separated list of tables.\n" + "COMMAND is one of:\n"
-115  + "  add add tables to 
a set, create a set if needed\n"
-116  + "  remove  remove tables 
from a set\n"
-117  + "  listlist all 
backup sets in the system\n"
-118  + "  describedescribe 
set\n" + "  delete  delete backup set\n";
-119  public static final String 
MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n"
-120  + "  backup_ids  Comma 
separated list of backup image ids.\n";
-121
-122  public static final String USAGE_FOOTER 
= "";
-123
-124  public static abstract class Command 
extends Configured {
-125CommandLine cmdline;
-126Connection conn;
-127
-128Command(Configuration conf) {
-129  if (conf == null) {
-130conf = 
HBaseConfiguration.create();
-131  }
-132  setConf(conf);
-133}
-134
-135public void execute() throws 
IOException {
-136  if (cmdline.hasOption("h") || 
cmdline.hasOption("help")) {
-137printUsage();
-138throw new 
IOException(INCORRECT_USAGE);
-139  }
-140
-141  // Create connection
-142  conn = 
ConnectionFactory.createConnection(getConf());
-143  if (requiresNoActiveSession()) {
-144// Check active session
-145try (BackupSystemTable table = 
new BackupSystemTable(conn);) {
-146  ListBackupInfo sessions 
= table.getBackupInfos(BackupState.RUNNING);
-147
-148  if (sessions.size()  0) {
-149System.err.println("Found 
backup session in a RUNNING state: ");
-150
System.err.println(sessions.get(0));
-151System.err.println("This may 
indicate that a previous session has failed abnormally.");
-152System.err.println("In this 
case, backup recovery is recommended.");
-153throw new IOException("Active 
session found, aborted command execution");
-154  }
-155}
-156  }
-157  if (requiresConsistentState()) {
-158// Check failed delete
-159try (BackupSystemTable table = 
new BackupSystemTable(conn);) {
-160  String[] ids = 
table.getListOfBackupIdsFromDeleteOperation();
-161
-162  if (ids != null  
ids.length  0) {
-163System.err.println("Found 
failed backup DELETE coommand. ");
-164System.err.println("Backup 
system recovery is required.");
-165throw new IOException("Failed 
backup DELETE found, aborted command execution");
-166  }
-167
-168  ids = 
table.getListOfBackupIdsFromMergeOperation();
-169  if (ids != null  
ids.length  0) {
-170System.err.println("Found 
failed backup MERGE coommand. ");
-171System.err.println("Backup 
system recovery is required.");
-172throw new IOException("Failed 
backup MERGE found, aborted command execution");
-173  }
-174
-175}
-176  }
-177}
-178
-179public void finish() throws 
IOException {
-180  if (conn != null) {
-181conn.close();
-182  }
-183}
-184
-185protected abstract void 
printUsage();
-186
-187/**
-188 * The command can't be run if active 
backup session is in progress
-189 * @return true if no active sessions 
are in progress
-190 */
-191protected boolean 
requiresNoActiveSession() {
-192  return false;
-193}
-194
-195/**
-196 * Command requires consistent state 
of a backup system Backup system may become inconsistent
-197 * because of an abnormal termination 
of a backup session or delete command
-198 * @return true, if yes
-199 */
-200protected boolean 
requiresConsistentState() {
-201  return false;
-202}
-203  }
-204
-205  private BackupCommands() {
-206throw new 
AssertionError("Instantiating utility class...");
-207  }
-208
-209  public static Command 
createCommand(Configuration conf, 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ebe345d/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index a2c177f..a984504 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -1460,281 +1460,275 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 RegionCoprocessorHost.TableCoprocessorAttribute
 
 
-RegionOpeningState
-
-
 RegionScanner
 RegionScanner describes iterators over rows in an 
HRegion.
 
 
-
+
 RegionServerAccounting
 RegionServerAccounting keeps record of some basic real time 
information about
  the Region Server.
 
 
-
+
 RegionServerCoprocessorHost
 
-
+
 RegionServerCoprocessorHost.CoprocessOperationWithResult
 
-
+
 RegionServerCoprocessorHost.CoprocessorOperation
 
-
+
 RegionServerCoprocessorHost.RegionServerEnvironment
 Coprocessor environment extension providing access to 
region server
  related services.
 
 
-
+
 RegionServerServices
 Services provided by HRegionServer
 
 
-
+
 RegionServerServices.PostOpenDeployContext
 Context for postOpenDeployTasks().
 
 
-
+
 RegionServerServices.RegionStateTransitionContext
 
-
+
 RegionServerStoppedException
 Thrown by the region server when it is in shutting down 
state.
 
 
-
+
 RegionServicesForStores
 Services a Store needs from a Region.
 
 
-
+
 RegionSplitPolicy
 A split policy determines when a region should be 
split.
 
 
-
+
 RegionUnassigner
 Used to unssign a region when we hit FNFE.
 
 
-
+
 ReplicationService
 Gateway to Cluster Replication.
 
 
-
+
 ReplicationSinkService
 A sink for a replication stream has to expose this 
service.
 
 
-
+
 ReplicationSourceService
 A source for a replication stream has to expose this 
service.
 
 
-
+
 ReversedStoreScanner
 ReversedStoreScanner extends from StoreScanner, and is used 
to support
  reversed scanning.
 
 
-
+
 RowProcessor
 Defines the procedure to atomically perform multiple scans 
and mutations
  on a HRegion.
 
 
-
+
 RpcSchedulerFactory
 A factory class that constructs an RpcScheduler.
 
 
-
+
 RSRpcServices
 Implements the regionserver RPC services.
 
 
-
+
 RSRpcServices.LogDelegate
 
-
+
 RSRpcServices.RegionScannerHolder
 Holder class which holds the RegionScanner, nextCallSeq and 
RpcCallbacks together.
 
 
-
+
 RSRpcServices.RegionScannersCloseCallBack
 An RpcCallBack that creates a list of scanners that needs 
to perform callBack operation on
  completion of multiGets.
 
 
-
+
 ScanInfo
 Immutable information for scans over a store.
 
 
-
+
 ScannerContext
 ScannerContext instances encapsulate limit tracking AND 
progress towards those limits during
  invocations of InternalScanner.next(java.util.List)
 and
  InternalScanner.next(java.util.List).
 
 
-
+
 ScannerContext.Builder
 
-
+
 ScannerContext.LimitFields
 The different fields that can be used as limits in calls to
  InternalScanner.next(java.util.List)
 and InternalScanner.next(java.util.List)
 
 
-
+
 ScannerContext.LimitScope
 The various scopes where a limit can be enforced.
 
 
-
+
 ScannerContext.NextState
 The possible states a scanner may be in following a call to 
InternalScanner.next(List)
 
 
-
+
 ScannerIdGenerator
 Generate a new style scanner id to prevent collision with 
previous started server or other RSs.
 
 
-
+
 ScanType
 Enum to distinguish general scan types.
 
 
-
+
 SecureBulkLoadManager
 Bulk loads in secure mode.
 
 
-
+
 Segment
 This is an abstraction of a segment maintained in a 
memstore, e.g., the active
  cell set or its snapshot.
 
 
-
+
 SegmentFactory
 A singleton store segment factory.
 
 
-
+
 ServerNonceManager
 Implementation of nonce manager that stores nonces in a 
hash map and cleans them up after
  some time; if nonce group/client ID is supplied, nonces are stored by client 
ID.
 
 
-
+
 ServerNonceManager.OperationContext
 
-
+
 Shipper
 This interface denotes a scanner as one which can ship 
cells.
 
 
-
+
 ShipperListener
 Implementors of this interface are the ones who needs to do 
some action when the
  Shipper.shipped()
 is called
 
 
-
+
 SplitLogWorker
 This worker is spawned in every regionserver, including 
master.
 
 
-
+
 SplitLogWorker.TaskExecutor
 Objects implementing this interface actually do the task 
that has been
  acquired by a SplitLogWorker.
 
 
-
+
 SplitLogWorker.TaskExecutor.Status
 
-
+
 Store
 Interface for objects that hold a column family in a 
Region.
 
 
-
+
 StoreConfigInformation
 A more restricted interface for HStore.
 
 
-
+
 StoreEngine
 StoreEngine is a factory that can create the objects 
necessary for HStore to operate.
 
 
-
+
 StoreFile
 An interface to describe a store data file.
 
 
-
+
 StoreFileInfo
 Describe a StoreFile (hfile, reference, link)
 
 
-
+
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0d6dd914/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html
new file mode 100644
index 000..64d366a
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html
@@ -0,0 +1,610 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 *   
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing,
+013 * software distributed under the License 
is distributed on an
+014 * "AS IS" BASIS, WITHOUT WARRANTIES OR 
CONDITIONS OF ANY
+015 * KIND, either express or implied.  See 
the License for the
+016 * specific language governing 
permissions and limitations
+017 * under the License.
+018 */
+019package 
org.apache.hadoop.hbase.coprocessor;
+020
+021import com.google.protobuf.RpcCallback;
+022import 
com.google.protobuf.RpcController;
+023import com.google.protobuf.Service;
+024import java.io.Closeable;
+025import java.io.IOException;
+026import 
java.security.PrivilegedExceptionAction;
+027import java.util.ArrayList;
+028import java.util.LinkedList;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.TreeMap;
+032import org.apache.commons.logging.Log;
+033import 
org.apache.commons.logging.LogFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.FileSystem;
+036import org.apache.hadoop.fs.Path;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.Coprocessor;
+039import 
org.apache.hadoop.hbase.CoprocessorEnvironment;
+040import 
org.apache.hadoop.hbase.HBaseConfiguration;
+041import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+042import 
org.apache.hadoop.hbase.HRegionInfo;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.classification.InterfaceStability;
+046import 
org.apache.hadoop.hbase.client.Connection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.Result;
+049import 
org.apache.hadoop.hbase.client.Scan;
+050import 
org.apache.hadoop.hbase.client.Table;
+051import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+052import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+053import 
org.apache.hadoop.hbase.ipc.RpcServer;
+054import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
+055import 
org.apache.hadoop.hbase.mapreduce.ExportUtils;
+056import 
org.apache.hadoop.hbase.mapreduce.Import;
+057import 
org.apache.hadoop.hbase.mapreduce.ResultSerialization;
+058import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+059import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken;
+060import 
org.apache.hadoop.hbase.protobuf.generated.ExportProtos;
+061import 
org.apache.hadoop.hbase.regionserver.InternalScanner;
+062import 
org.apache.hadoop.hbase.regionserver.Region;
+063import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
+064import 
org.apache.hadoop.hbase.security.User;
+065import 
org.apache.hadoop.hbase.security.UserProvider;
+066import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
+067import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+068import 
org.apache.hadoop.hbase.util.ArrayUtils;
+069import 
org.apache.hadoop.hbase.util.ByteStringer;
+070import 
org.apache.hadoop.hbase.util.Bytes;
+071import 
org.apache.hadoop.hbase.util.Triple;
+072import 
org.apache.hadoop.io.SequenceFile;
+073import org.apache.hadoop.io.Text;
+074import 
org.apache.hadoop.io.compress.CompressionCodec;
+075import 
org.apache.hadoop.io.compress.DefaultCodec;
+076import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+077import 
org.apache.hadoop.security.token.Token;
+078import 
org.apache.hadoop.util.GenericOptionsParser;
+079import 
org.apache.hadoop.util.ReflectionUtils;
+080
+081/**
+082 * Export an HBase table. Writes content 
to sequence files up in HDFS. Use
+083 * {@link Import} to read it back in 
again. It is implemented by the endpoint
+084 * technique.
+085 *
+086 * @see 
org.apache.hadoop.hbase.mapreduce.Export
+087 */
+088@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+089@InterfaceStability.Evolving

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f751513b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.LastOnRowColByteBufferCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.LastOnRowColByteBufferCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.LastOnRowColByteBufferCell.html
index 3e734a6..6b0729b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.LastOnRowColByteBufferCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.LastOnRowColByteBufferCell.html
@@ -324,2879 +324,2916 @@
 316return buffer;
 317  }
 318
-319  public static Cell createCell(final 
byte [] row, final byte [] family, final byte [] qualifier,
-320  final long timestamp, final byte 
type, final byte [] value) {
-321// I need a Cell Factory here.  Using 
KeyValue for now. TODO.
-322// TODO: Make a new Cell 
implementation that just carries these
-323// byte arrays.
-324// TODO: Call factory to create 
Cell
-325return new KeyValue(row, family, 
qualifier, timestamp, KeyValue.Type.codeToType(type), value);
-326  }
-327
-328  public static Cell createCell(final 
byte [] rowArray, final int rowOffset, final int rowLength,
-329  final byte [] familyArray, final 
int familyOffset, final int familyLength,
-330  final byte [] qualifierArray, final 
int qualifierOffset, final int qualifierLength) {
-331// See createCell(final byte [] row, 
final byte [] value) for why we default Maximum type.
-332return new KeyValue(rowArray, 
rowOffset, rowLength,
-333familyArray, familyOffset, 
familyLength,
-334qualifierArray, qualifierOffset, 
qualifierLength,
-335HConstants.LATEST_TIMESTAMP,
-336KeyValue.Type.Maximum,
-337HConstants.EMPTY_BYTE_ARRAY, 0, 
HConstants.EMPTY_BYTE_ARRAY.length);
-338  }
-339
-340  /**
-341   * Marked as audience Private as of 
1.2.0.
-342   * Creating a Cell with a 
memstoreTS/mvcc is an internal implementation detail not for
-343   * public use.
-344   */
-345  @InterfaceAudience.Private
-346  public static Cell createCell(final 
byte[] row, final byte[] family, final byte[] qualifier,
-347  final long timestamp, final byte 
type, final byte[] value, final long memstoreTS) {
-348KeyValue keyValue = new KeyValue(row, 
family, qualifier, timestamp,
-349KeyValue.Type.codeToType(type), 
value);
-350keyValue.setSequenceId(memstoreTS);
-351return keyValue;
-352  }
-353
-354  /**
-355   * Marked as audience Private as of 
1.2.0.
-356   * Creating a Cell with tags and a 
memstoreTS/mvcc is an internal implementation detail not for
-357   * public use.
-358   */
-359  @InterfaceAudience.Private
-360  public static Cell createCell(final 
byte[] row, final byte[] family, final byte[] qualifier,
-361  final long timestamp, final byte 
type, final byte[] value, byte[] tags,
-362  final long memstoreTS) {
-363KeyValue keyValue = new KeyValue(row, 
family, qualifier, timestamp,
-364KeyValue.Type.codeToType(type), 
value, tags);
-365keyValue.setSequenceId(memstoreTS);
-366return keyValue;
-367  }
-368
-369  /**
-370   * Marked as audience Private as of 
1.2.0.
-371   * Creating a Cell with tags is an 
internal implementation detail not for
-372   * public use.
-373   */
-374  @InterfaceAudience.Private
-375  public static Cell createCell(final 
byte[] row, final byte[] family, final byte[] qualifier,
-376  final long timestamp, Type type, 
final byte[] value, byte[] tags) {
-377KeyValue keyValue = new KeyValue(row, 
family, qualifier, timestamp, type, value, tags);
-378return keyValue;
-379  }
-380
-381  /**
-382   * Create a Cell with specific row.  
Other fields defaulted.
-383   * @param row
-384   * @return Cell with passed row but all 
other fields are arbitrary
-385   */
-386  public static Cell createCell(final 
byte [] row) {
-387return createCell(row, 
HConstants.EMPTY_BYTE_ARRAY);
-388  }
-389
-390  /**
-391   * Create a Cell with specific row and 
value.  Other fields are defaulted.
-392   * @param row
-393   * @param value
-394   * @return Cell with passed row and 
value but all other fields are arbitrary
-395   */
-396  public static Cell createCell(final 
byte [] row, final byte [] value) {
-397// An empty family + empty qualifier 
+ Type.Minimum is used as flag to indicate last on row.
-398// See the CellComparator and 
KeyValue comparator.  Search for compareWithoutRow.
-399// Lets not make a last-on-row key as 
default but at same time, if you are making a key
-400// without specifying type, etc., 
flag it as weird by setting type to be Maximum.
-401return createCell(row, 
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
-402  HConstants.LATEST_TIMESTAMP, 
KeyValue.Type.Maximum.getCode(), value);
-403  }
-404
-405  /**
-406   * Create a Cell with specific row.  
Other fields defaulted.

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ebf9a8b8/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index d59138d..d615a4c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -114,10 +114,14 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
+
+Since:
+2.0.0
+
 
 
 
@@ -901,7 +905,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -910,7 +914,7 @@ implements 
 
 rawAdmin
-private finalRawAsyncHBaseAdmin rawAdmin
+private finalRawAsyncHBaseAdmin rawAdmin
 
 
 
@@ -919,7 +923,7 @@ implements 
 
 pool
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -936,7 +940,7 @@ implements 
 
 AsyncHBaseAdmin
-AsyncHBaseAdmin(RawAsyncHBaseAdminrawAdmin,
+AsyncHBaseAdmin(RawAsyncHBaseAdminrawAdmin,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 
 
@@ -954,7 +958,7 @@ implements 
 
 wrap
-privateThttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTwrap(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTfuture)
+privateThttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTwrap(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTfuture)
 
 
 
@@ -963,7 +967,7 @@ implements 
 
 tableExists
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
 
 Specified by:
 tableExistsin
 interfaceAsyncAdmin
@@ -981,7 +985,7 @@ implements 
 
 listTables
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,

booleanincludeSysTables)
 Description copied from 
interface:AsyncAdmin
 List all the tables matching the given pattern.
@@ -1002,7 +1006,7 @@ implements 
 
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b5143ed/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index f5186b5..43afb13 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -36,5085 +36,5084 @@
 028import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
 029import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Ordering;
 030import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.TreeMultimap;
-031import 
com.google.protobuf.ServiceException;
-032
-033import java.io.Closeable;
-034import java.io.FileNotFoundException;
-035import java.io.IOException;
-036import java.io.InterruptedIOException;
-037import java.io.PrintWriter;
-038import java.io.StringWriter;
-039import java.net.InetAddress;
-040import java.net.URI;
-041import java.util.ArrayList;
-042import java.util.Arrays;
-043import java.util.Collection;
-044import java.util.Collections;
-045import java.util.Comparator;
-046import java.util.HashMap;
-047import java.util.HashSet;
-048import java.util.Iterator;
-049import java.util.List;
-050import java.util.Locale;
-051import java.util.Map;
-052import java.util.Map.Entry;
-053import java.util.Set;
-054import java.util.SortedMap;
-055import java.util.SortedSet;
-056import java.util.TreeMap;
-057import java.util.TreeSet;
-058import java.util.Vector;
-059import java.util.concurrent.Callable;
-060import 
java.util.concurrent.ConcurrentSkipListMap;
-061import 
java.util.concurrent.ExecutionException;
-062import 
java.util.concurrent.ExecutorService;
-063import java.util.concurrent.Executors;
-064import java.util.concurrent.Future;
-065import java.util.concurrent.FutureTask;
-066import 
java.util.concurrent.ScheduledThreadPoolExecutor;
-067import java.util.concurrent.TimeUnit;
-068import 
java.util.concurrent.TimeoutException;
-069import 
java.util.concurrent.atomic.AtomicBoolean;
-070import 
java.util.concurrent.atomic.AtomicInteger;
-071
-072import org.apache.commons.io.IOUtils;
-073import 
org.apache.commons.lang.RandomStringUtils;
-074import 
org.apache.commons.lang.StringUtils;
-075import org.apache.commons.logging.Log;
-076import 
org.apache.commons.logging.LogFactory;
-077import 
org.apache.hadoop.conf.Configuration;
-078import 
org.apache.hadoop.conf.Configured;
-079import 
org.apache.hadoop.fs.FSDataOutputStream;
-080import org.apache.hadoop.fs.FileStatus;
-081import org.apache.hadoop.fs.FileSystem;
-082import org.apache.hadoop.fs.Path;
-083import 
org.apache.hadoop.fs.permission.FsAction;
-084import 
org.apache.hadoop.fs.permission.FsPermission;
-085import 
org.apache.hadoop.hbase.Abortable;
-086import org.apache.hadoop.hbase.Cell;
-087import 
org.apache.hadoop.hbase.CellUtil;
-088import 
org.apache.hadoop.hbase.ClusterStatus;
-089import 
org.apache.hadoop.hbase.HBaseConfiguration;
-090import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-091import 
org.apache.hadoop.hbase.HConstants;
-092import 
org.apache.hadoop.hbase.HRegionInfo;
-093import 
org.apache.hadoop.hbase.HRegionLocation;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.MasterNotRunningException;
-096import 
org.apache.hadoop.hbase.MetaTableAccessor;
-097import 
org.apache.hadoop.hbase.RegionLocations;
-098import 
org.apache.hadoop.hbase.ServerName;
-099import 
org.apache.hadoop.hbase.TableName;
-100import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-101import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-102import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-103import 
org.apache.hadoop.hbase.client.Admin;
-104import 
org.apache.hadoop.hbase.client.ClusterConnection;
-105import 
org.apache.hadoop.hbase.client.Connection;
-106import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Get;
-109import 
org.apache.hadoop.hbase.client.Put;
-110import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-111import 
org.apache.hadoop.hbase.client.Result;
-112import 
org.apache.hadoop.hbase.client.RowMutations;
-113import 
org.apache.hadoop.hbase.client.Table;
-114import 
org.apache.hadoop.hbase.client.TableState;
-115import 
org.apache.hadoop.hbase.io.FileLink;
-116import 
org.apache.hadoop.hbase.io.HFileLink;
-117import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-118import 
org.apache.hadoop.hbase.io.hfile.HFile;
-119import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-120import 
org.apache.hadoop.hbase.master.RegionState;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-122import 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f1f2a0b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index bad64ea..94974ec 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -87,58 +87,94 @@
 
 
 
+org.apache.hadoop.hbase.backup.util
+
+
+
 org.apache.hadoop.hbase.client
 
 Provides HBase Client
 
 
-
+
 org.apache.hadoop.hbase.constraint
 
 Restrict the domain of a data attribute, often times to 
fulfill business rules/requirements.
 
 
-
+
 org.apache.hadoop.hbase.coprocessor
 
 Table of Contents
 
 
-
+
 org.apache.hadoop.hbase.coprocessor.example
 
 
-
+
 org.apache.hadoop.hbase.mapreduce
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 
-
+
 org.apache.hadoop.hbase.master
 
 
+
+org.apache.hadoop.hbase.master.assignment
+
+
+
+org.apache.hadoop.hbase.master.balancer
+
+
+
+org.apache.hadoop.hbase.master.procedure
+
+
+
+org.apache.hadoop.hbase.master.snapshot
+
+
+
+org.apache.hadoop.hbase.mob
+
+
 
 org.apache.hadoop.hbase.regionserver
 
 
 
-org.apache.hadoop.hbase.rest.client
+org.apache.hadoop.hbase.regionserver.handler
 
 
 
-org.apache.hadoop.hbase.rsgroup
+org.apache.hadoop.hbase.rest.client
 
 
 
-org.apache.hadoop.hbase.security.access
+org.apache.hadoop.hbase.rsgroup
 
 
 
+org.apache.hadoop.hbase.security.access
+
+
+
 org.apache.hadoop.hbase.security.visibility
 
 
+
+org.apache.hadoop.hbase.snapshot
+
+
+
+org.apache.hadoop.hbase.util
+
+
 
 
 
@@ -166,6 +202,65 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+Methods in org.apache.hadoop.hbase
 that return TableDescriptor
+
+Modifier and Type
+Method and Description
+
+
+
+TableDescriptor
+TableDescriptors.get(TableNametableName)
+
+
+TableDescriptor
+TableDescriptors.remove(TableNametablename)
+
+
+
+
+Methods in org.apache.hadoop.hbase
 that return types with arguments of type TableDescriptor
+
+Modifier and Type
+Method and Description
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,TableDescriptor
+TableDescriptors.getAll()
+Get Map of all TableDescriptors.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,TableDescriptor
+TableDescriptors.getAllDescriptors()
+Get Map of all TableDescriptors.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,TableDescriptor
+TableDescriptors.getByNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+Get Map of all NamespaceDescriptors for a given 
namespace.
+
+
+
+
+
+Methods in org.apache.hadoop.hbase
 with parameters of type TableDescriptor
+
+Modifier and Type
+Method and Description
+
+
+
+void
+TableDescriptors.add(TableDescriptorhtd)
+Add or update descriptor
+
+
+
+
 
 Constructors in org.apache.hadoop.hbase
 with parameters of type TableDescriptor
 
@@ -180,6 +275,59 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+
+
+Uses of TableDescriptor in org.apache.hadoop.hbase.backup.util
+
+Methods in org.apache.hadoop.hbase.backup.util
 that return TableDescriptor
+
+Modifier and Type
+Method and Description
+
+
+
+(package private) TableDescriptor
+RestoreTool.getTableDesc(TableNametableName)
+Get table descriptor
+
+
+
+private TableDescriptor
+RestoreTool.getTableDescriptor(org.apache.hadoop.fs.FileSystemfileSys,
+  TableNametableName,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringlastIncrBackupId)
+
+
+
+
+Methods in org.apache.hadoop.hbase.backup.util
 with parameters of type TableDescriptor
+
+Modifier and Type
+Method and Description
+
+
+
+private void
+RestoreTool.checkAndCreateTable(Connectionconn,
+   org.apache.hadoop.fs.PathtableBackupPath,
+   TableNametableName,
+   TableNametargetTableName,
+   

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
new file mode 100644
index 000..58f024a
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
@@ -0,0 +1,597 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Archetype builder  Reactor Dependency 
Convergence
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+Reactor Dependency 
Convergence
+
+  Legend:
+
+
+
+At least one dependency has a differing version of the dependency or has 
SNAPSHOT dependencies.
+
+  Statistics:
+
+
+Number of modules:
+37
+
+Number of dependencies (NOD):
+304
+
+Number of unique artifacts (NOA):
+329
+
+Number of version-conflicting artifacts (NOC):
+16
+
+Number of SNAPSHOT artifacts (NOS):
+0
+
+Convergence (NOD/NOA):
+92 
%
+
+Ready for release (100% convergence and no SNAPSHOTS):
+ErrorYou do not have 100% convergence.
+
+Dependencies used in 
modules
+
+com.fasterxml.jackson.core:jackson-databind
+
+
+
+
+
+
+2.3.1
+
+
+org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.10:jar:1.6.0:provided\-org.json4s:json4s-jackson_2.10:jar:3.2.10:provided\-(com.fasterxml.jackson.core:jackson-databind:jar:2.3.1:provided
 - omitted for conflict with 2.4.4)
+org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.10:jar:1.6.0:provided\-org.json4s:json4s-jackson_2.10:jar:3.2.10:provided\-(com.fasterxml.jackson.core:jackson-databind:jar:2.3.1:provided
 - omitted for conflict with 2.4.4)
+
+2.4.2
+
+
+org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.10:jar:1.6.0:provided\-io.dropwizard.metrics:metrics-json:jar:3.1.2:provided\-(com.fasterxml.jackson.core:jackson-databind:jar:2.4.2:provided
 - omitted for conflict with 2.4.4)
+org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.10:jar:1.6.0:provided\-io.dropwizard.metrics:metrics-json:jar:3.1.2:provided\-(com.fasterxml.jackson.core:jackson-databind:jar:2.4.2:provided
 - omitted for conflict with 2.4.4)
+
+2.4.4
+
+
+org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT+-org.apache.spark:spark-core_2.10:jar:1.6.0:provided|+-org.apache.spark:spark-network-shuffle_2.10:jar:1.6.0:provided||\-(com.fasterxml.jackson.core:jackson-databind:jar:2.4.4:provided
 - omitted for 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
index b5f351a..de70e5b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
@@ -51,700 +51,703 @@
 043import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan;
 044import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 045import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-046import 
org.apache.hadoop.hbase.master.MasterServices;
-047import 
org.apache.hadoop.hbase.master.RegionPlan;
-048import 
org.apache.hadoop.hbase.util.Pair;
-049
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-053
-054/**
-055 * An implementation of the {@link 
org.apache.hadoop.hbase.master.LoadBalancer} that
-056 * assigns favored nodes for each region. 
There is a Primary RegionServer that hosts
-057 * the region, and then there is 
Secondary and Tertiary RegionServers. Currently, the
-058 * favored nodes information is used in 
creating HDFS files - the Primary RegionServer
-059 * passes the primary, secondary, 
tertiary node addresses as hints to the
-060 * DistributedFileSystem API for creating 
files on the filesystem. These nodes are
-061 * treated as hints by the HDFS to place 
the blocks of the file. This alleviates the
-062 * problem to do with reading from remote 
nodes (since we can make the Secondary
-063 * RegionServer as the new Primary 
RegionServer) after a region is recovered. This
-064 * should help provide consistent read 
latencies for the regions even when their
-065 * primary region servers die. This 
provides two
-066 * {@link 
org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator}
-067 *
-068 */
-069public class FavoredStochasticBalancer 
extends StochasticLoadBalancer implements
-070FavoredNodesPromoter {
-071
-072  private static final Log LOG = 
LogFactory.getLog(FavoredStochasticBalancer.class);
-073  private FavoredNodesManager fnm;
-074
-075  @Override
-076  public void initialize() throws 
HBaseIOException {
-077configureGenerators();
-078super.initialize();
-079  }
-080
-081  protected void configureGenerators() 
{
-082ListCandidateGenerator 
fnPickers = new ArrayList(2);
-083fnPickers.add(new 
FavoredNodeLoadPicker());
-084fnPickers.add(new 
FavoredNodeLocalityPicker());
-085setCandidateGenerators(fnPickers);
-086  }
-087
-088  @Override
-089  public void 
setMasterServices(MasterServices masterServices) {
-090
super.setMasterServices(masterServices);
-091fnm = 
masterServices.getFavoredNodesManager();
-092  }
-093
-094  /*
-095   * Round robin assignment: Segregate 
the regions into two types:
-096   *
-097   * 1. The regions that have favored 
node assignment where at least one of the favored node
-098   * is still alive. In this case, try to 
adhere to the current favored nodes assignment as
-099   * much as possible - i.e., if the 
current primary is gone, then make the secondary or
-100   * tertiary as the new host for the 
region (based on their current load). Note that we don't
-101   * change the favored node assignments 
here (even though one or more favored node is
-102   * currently down). That will be done 
by the admin operations.
-103   *
-104   * 2. The regions that currently don't 
have favored node assignments. Generate favored nodes
-105   * for them and then assign. Generate 
the primary fn in round robin fashion and generate
-106   * secondary and tertiary as per 
favored nodes constraints.
-107   */
-108  @Override
-109  public MapServerName, 
ListHRegionInfo roundRobinAssignment(ListHRegionInfo 
regions,
-110  ListServerName servers) 
throws HBaseIOException {
-111
-112
metricsBalancer.incrMiscInvocations();
-113
-114SetHRegionInfo regionSet = 
Sets.newHashSet(regions);
-115MapServerName, 
ListHRegionInfo assignmentMap = assignMasterRegions(regions, 
servers);
-116if (assignmentMap != null  
!assignmentMap.isEmpty()) {
-117  servers = new 
ArrayList(servers);
-118  // Guarantee not to put other 
regions on master
-119  servers.remove(masterServerName);
-120  ListHRegionInfo 
masterRegions = assignmentMap.get(masterServerName);
-121  if (!masterRegions.isEmpty()) {
-122for 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index f47225a..5a77704 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -841,1497 +841,1513 @@
 833   * @param regionname region name to 
close
 834   * @param serverName Deprecated. Not 
used.
 835   * @throws IOException if a remote or 
network exception occurs
-836   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-837   */
-838  void closeRegion(final String 
regionname, final String serverName) throws IOException;
-839
-840  /**
-841   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-842   *
-843   * @param regionname region name to 
close
-844   * @param serverName Deprecated. Not 
used.
-845   * @throws IOException if a remote or 
network exception occurs
-846   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-847   */
-848  void closeRegion(final byte[] 
regionname, final String serverName) throws IOException;
-849
-850  /**
-851   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-852   *
-853   * @param encodedRegionName The encoded 
region name; i.e. the hash that makes up the region name
-854   * suffix: e.g. if regionname is
-855   * 
codeTestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396./code,
-856   * then the encoded region name is: 
code527db22f95c8a9e0116f0cc13c680396/code.
-857   * @param serverName Deprecated. Not 
used.
-858   * @return Deprecated. Returns true 
always.
-859   * @throws IOException if a remote or 
network exception occurs
-860   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-861   */
-862  boolean 
closeRegionWithEncodedRegionName(final String encodedRegionName, final String 
serverName)
-863  throws IOException;
-864
-865  /**
-866   * Used {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-867   *
-868   * @param sn Deprecated. Not used.
-869   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-870   */
-871  void closeRegion(final ServerName sn, 
final HRegionInfo hri) throws IOException;
-872
-873  /**
-874   * Get all the online regions on a 
region server.
-875   */
-876  ListHRegionInfo 
getOnlineRegions(final ServerName sn) throws IOException;
-877
-878  /**
-879   * Flush a table. Synchronous 
operation.
-880   *
-881   * @param tableName table to flush
-882   * @throws IOException if a remote or 
network exception occurs
-883   */
-884  void flush(final TableName tableName) 
throws IOException;
-885
-886  /**
-887   * Flush an individual region. 
Synchronous operation.
-888   *
-889   * @param regionName region to flush
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void flushRegion(final byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Compact a table. Asynchronous 
operation.
-896   *
-897   * @param tableName table to compact
-898   * @throws IOException if a remote or 
network exception occurs
-899   */
-900  void compact(final TableName tableName) 
throws IOException;
-901
-902  /**
-903   * Compact an individual region. 
Asynchronous operation.
-904   *
-905   * @param regionName region to 
compact
-906   * @throws IOException if a remote or 
network exception occurs
-907   */
-908  void compactRegion(final byte[] 
regionName) throws IOException;
-909
-910  /**
-911   * Compact a column family within a 
table. Asynchronous operation.
-912   *
-913   * @param tableName table to compact
-914   * @param columnFamily column family 
within a table
-915   * @throws IOException if a remote or 
network exception occurs
-916   */
-917  void compact(final TableName tableName, 
final byte[] columnFamily)
-918throws IOException;
-919
-920  /**
-921   * Compact a column family within a 
region. Asynchronous operation.
-922   *
-923   * @param regionName region to 
compact
-924   * @param columnFamily column family 
within a region
-925   * @throws IOException if a remote or 
network exception occurs
-926   */
-927  void compactRegion(final byte[] 
regionName, final byte[] columnFamily)
-928throws IOException;
-929
-930  /**
-931   * Major compact a table. Asynchronous 
operation.
-932   *
-933   * @param tableName table to major 
compact
-934   * @throws IOException if a remote or 
network exception occurs
-935   */
-936  void majorCompact(TableName tableName) 
throws IOException;
-937
-938  /**
-939   * Major compact a table or an 
individual region. 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
index 528a384..401b413 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
@@ -318,558 +318,604 @@
 310  }
 311
 312  @Test
-313  public void 
testHTableExistsMethodSingleRegionSingleGet() throws Exception {
-314  // Test with a single region 
table.
-315  Table table = 
TEST_UTIL.createTable(
-316  
TableName.valueOf(name.getMethodName()),
-317  new byte[][] { FAMILY });
-318
-319Put put = new Put(ROW);
-320put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-321
-322Get get = new Get(ROW);
-323
-324boolean exist = table.exists(get);
-325assertEquals(exist, false);
+313  public void testBatchWithRowMutation() 
throws Exception {
+314LOG.info("Starting 
testBatchWithRowMutation");
+315final TableName TABLENAME = 
TableName.valueOf("testBatchWithRowMutation");
+316try (Table t = 
TEST_UTIL.createTable(TABLENAME, FAMILY)) {
+317  byte [][] QUALIFIERS = new byte 
[][] {
+318Bytes.toBytes("a"), 
Bytes.toBytes("b")
+319  };
+320  RowMutations arm = new 
RowMutations(ROW);
+321  Put p = new Put(ROW);
+322  p.addColumn(FAMILY, QUALIFIERS[0], 
VALUE);
+323  arm.add(p);
+324  Object[] batchResult = new 
Object[1];
+325  t.batch(Arrays.asList(arm), 
batchResult);
 326
-327table.put(put);
-328
-329exist = table.exists(get);
-330assertEquals(exist, true);
-331  }
-332
-333  public void 
testHTableExistsMethodSingleRegionMultipleGets() throws Exception {
-334Table table = 
TEST_UTIL.createTable(TableName.valueOf(
-335name.getMethodName()), new 
byte[][] { FAMILY });
-336
-337Put put = new Put(ROW);
-338put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-339table.put(put);
-340
-341ListGet gets = new 
ArrayList();
-342gets.add(new Get(ROW));
-343gets.add(null);
-344gets.add(new Get(ANOTHERROW));
-345
-346boolean[] results = 
table.existsAll(gets);
-347assertEquals(results[0], true);
-348assertEquals(results[1], false);
-349assertEquals(results[2], false);
-350  }
-351
-352  @Test
-353  public void testHTableExistsBeforeGet() 
throws Exception {
-354Table table = 
TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
-355new byte[][] { FAMILY });
-356try {
-357  Put put = new Put(ROW);
-358  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-359  table.put(put);
-360
-361  Get get = new Get(ROW);
-362
-363  boolean exist = 
table.exists(get);
-364  assertEquals(true, exist);
-365
-366  Result result = table.get(get);
-367  assertEquals(false, 
result.isEmpty());
-368  assertTrue(Bytes.equals(VALUE, 
result.getValue(FAMILY, QUALIFIER)));
-369} finally {
-370  table.close();
-371}
-372  }
-373
-374  @Test
-375  public void 
testHTableExistsAllBeforeGet() throws Exception {
-376final byte[] ROW2 = Bytes.add(ROW, 
Bytes.toBytes("2"));
-377Table table = 
TEST_UTIL.createTable(
-378
TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY });
-379try {
-380  Put put = new Put(ROW);
-381  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-382  table.put(put);
-383  put = new Put(ROW2);
-384  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-385  table.put(put);
+327  Get g = new Get(ROW);
+328  Result r = t.get(g);
+329  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0])));
+330
+331  arm = new RowMutations(ROW);
+332  p = new Put(ROW);
+333  p.addColumn(FAMILY, QUALIFIERS[1], 
VALUE);
+334  arm.add(p);
+335  Delete d = new Delete(ROW);
+336  d.addColumns(FAMILY, 
QUALIFIERS[0]);
+337  arm.add(d);
+338  t.batch(Arrays.asList(arm), 
batchResult);
+339  r = t.get(g);
+340  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
+341  assertNull(r.getValue(FAMILY, 
QUALIFIERS[0]));
+342
+343  // Test that we get the correct 
remote exception for RowMutations from batch()
+344  try {
+345arm = new RowMutations(ROW);
+346p = new Put(ROW);
+347p.addColumn(new byte[]{'b', 'o', 
'g', 'u', 's'}, QUALIFIERS[0], VALUE);
+348arm.add(p);
+349t.batch(Arrays.asList(arm), 
batchResult);
+350fail("Expected 
RetriesExhaustedWithDetailsException with 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8bae1c8a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
index 1ebfe64..93b86d7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
@@ -164,7 +164,7 @@ extends Constructor and Description
 
 
-ImmutableHColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptordesc)
+ImmutableHColumnDescriptor(ColumnFamilyDescriptordesc)
 Deprecated.
 
 
@@ -233,13 +233,13 @@ extends Deprecated.
 
 
-
+
 
 
 
 
 ImmutableHColumnDescriptor
-ImmutableHColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptordesc)
+publicImmutableHColumnDescriptor(ColumnFamilyDescriptordesc)
 Deprecated.
 
 
@@ -257,7 +257,7 @@ extends 
 
 getDelegateeForModification
-protectedColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptorgetDelegateeForModification()
+protectedColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptorgetDelegateeForModification()
 Deprecated.
 
 Overrides:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8bae1c8a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.html
index 787145a..35e385f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.html
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 Deprecated.
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
- @InterfaceAudience.Public
+ @InterfaceAudience.Private
 public class ImmutableHTableDescriptor
 extends HTableDescriptor
 Read-only table descriptor.
@@ -168,6 +168,11 @@ extends Deprecated.
 
 
+
+ImmutableHTableDescriptor(TableDescriptordesc)
+Deprecated.
+
+
 
 
 
@@ -228,10 +233,20 @@ extends 
 
 
+
+
+ImmutableHTableDescriptor
+publicImmutableHTableDescriptor(HTableDescriptordesc)
+Deprecated.
+
+
+
+
+
 
 
 ImmutableHTableDescriptor
-publicImmutableHTableDescriptor(HTableDescriptordesc)
+publicImmutableHTableDescriptor(TableDescriptordesc)
 Deprecated.
 
 
@@ -269,7 +284,7 @@ extends 
 
 getDelegateeForModification
-protectedTableDescriptorBuilder.ModifyableTableDescriptorgetDelegateeForModification()
+protectedTableDescriptorBuilder.ModifyableTableDescriptorgetDelegateeForModification()
 Deprecated.
 
 Overrides:



[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
index 355f5ab..22e2f96 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
@@ -115,601 +115,601 @@
 107  private final TableName tableName;
 108  private final Configuration 
configuration;
 109  private final ConnectionConfiguration 
connConfiguration;
-110  @VisibleForTesting
-111  volatile BufferedMutatorImpl mutator;
-112  private final Object mutatorLock = new 
Object();
-113  private boolean closed = false;
-114  private final int scannerCaching;
-115  private final long 
scannerMaxResultSize;
-116  private final ExecutorService pool;  // 
For Multi  Scan
-117  private int operationTimeout; // global 
timeout for each blocking method with retrying rpc
-118  private final int rpcTimeout; // FIXME 
we should use this for rpc like batch and checkAndXXX
-119  private int readRpcTimeout; // timeout 
for each read rpc request
-120  private int writeRpcTimeout; // timeout 
for each write rpc request
-121  private final boolean 
cleanupPoolOnClose; // shutdown the pool in close()
-122  private final HRegionLocator locator;
-123  private final long writeBufferSize;
-124
-125  /** The Async process for batch */
-126  @VisibleForTesting
-127  AsyncProcess multiAp;
-128  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-129  private final RpcControllerFactory 
rpcControllerFactory;
-130
-131  // Marked Private @since 1.0
-132  @InterfaceAudience.Private
-133  public static ThreadPoolExecutor 
getDefaultExecutor(Configuration conf) {
-134int maxThreads = 
conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
-135if (maxThreads == 0) {
-136  maxThreads = 1; // is there a 
better default?
-137}
-138int corePoolSize = 
conf.getInt("hbase.htable.threads.coresize", 1);
-139long keepAliveTime = 
conf.getLong("hbase.htable.threads.keepalivetime", 60);
-140
-141// Using the "direct handoff" 
approach, new threads will only be created
-142// if it is necessary and will grow 
unbounded. This could be bad but in HCM
-143// we only create as many Runnables 
as there are region servers. It means
-144// it also scales when new region 
servers are added.
-145ThreadPoolExecutor pool = new 
ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime,
-146  TimeUnit.SECONDS, new 
SynchronousQueue(), Threads.newDaemonThreadFactory("htable"));
-147pool.allowCoreThreadTimeOut(true);
-148return pool;
-149  }
-150
-151  /**
-152   * Creates an object to access a HBase 
table.
-153   * Used by HBase internally.  DO NOT 
USE. See {@link ConnectionFactory} class comment for how to
-154   * get a {@link Table} instance (use 
{@link Table} instead of {@link HTable}).
-155   * @param connection Connection to be 
used.
-156   * @param builder The table builder
-157   * @param rpcCallerFactory The RPC 
caller factory
-158   * @param rpcControllerFactory The RPC 
controller factory
-159   * @param pool ExecutorService to be 
used.
-160   */
-161  @InterfaceAudience.Private
-162  protected HTable(final 
ClusterConnection connection,
-163  final TableBuilderBase builder,
-164  final RpcRetryingCallerFactory 
rpcCallerFactory,
-165  final RpcControllerFactory 
rpcControllerFactory,
-166  final ExecutorService pool) {
-167if (connection == null || 
connection.isClosed()) {
-168  throw new 
IllegalArgumentException("Connection is null or closed.");
-169}
-170this.connection = connection;
-171this.configuration = 
connection.getConfiguration();
-172this.connConfiguration = 
connection.getConnectionConfiguration();
-173if (pool == null) {
-174  this.pool = 
getDefaultExecutor(this.configuration);
-175  this.cleanupPoolOnClose = true;
-176} else {
-177  this.pool = pool;
-178  this.cleanupPoolOnClose = false;
-179}
-180if (rpcCallerFactory == null) {
-181  this.rpcCallerFactory = 
connection.getNewRpcRetryingCallerFactory(configuration);
-182} else {
-183  this.rpcCallerFactory = 
rpcCallerFactory;
-184}
-185
-186if (rpcControllerFactory == null) {
-187  this.rpcControllerFactory = 
RpcControllerFactory.instantiate(configuration);
-188} else {
-189  this.rpcControllerFactory = 
rpcControllerFactory;
-190}
-191
-192this.tableName = builder.tableName;
-193this.operationTimeout = 
builder.operationTimeout;
-194this.rpcTimeout = 
builder.rpcTimeout;
-195this.readRpcTimeout = 
builder.readRpcTimeout;
-196this.writeRpcTimeout = 
builder.writeRpcTimeout;
-197this.writeBufferSize = 
builder.writeBufferSize;
-198this.scannerCaching = 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7784a93a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 2eac870..56fa0a3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class StochasticLoadBalancer.CandidateGenerator
+abstract static class StochasticLoadBalancer.CandidateGenerator
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Generates a candidate action to be applied to the cluster 
for cost function search
 
@@ -232,7 +232,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CandidateGenerator
-CandidateGenerator()
+CandidateGenerator()
 
 
 
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 generate
-abstractBaseLoadBalancer.Cluster.Actiongenerate(BaseLoadBalancer.Clustercluster)
+abstractBaseLoadBalancer.Cluster.Actiongenerate(BaseLoadBalancer.Clustercluster)
 
 
 
@@ -258,7 +258,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRegion
-protectedintpickRandomRegion(BaseLoadBalancer.Clustercluster,
+protectedintpickRandomRegion(BaseLoadBalancer.Clustercluster,
intserver,
doublechanceOfNoSwap)
 From a list of regions pick a random one. Null can be 
returned which
@@ -282,7 +282,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomServer
-protectedintpickRandomServer(BaseLoadBalancer.Clustercluster)
+protectedintpickRandomServer(BaseLoadBalancer.Clustercluster)
 
 
 
@@ -291,7 +291,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRack
-protectedintpickRandomRack(BaseLoadBalancer.Clustercluster)
+protectedintpickRandomRack(BaseLoadBalancer.Clustercluster)
 
 
 
@@ -300,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickOtherRandomServer
-protectedintpickOtherRandomServer(BaseLoadBalancer.Clustercluster,
+protectedintpickOtherRandomServer(BaseLoadBalancer.Clustercluster,
 intserverIndex)
 
 
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickOtherRandomRack
-protectedintpickOtherRandomRack(BaseLoadBalancer.Clustercluster,
+protectedintpickOtherRandomRack(BaseLoadBalancer.Clustercluster,
   intrackIndex)
 
 
@@ -320,7 +320,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRegions
-protectedBaseLoadBalancer.Cluster.ActionpickRandomRegions(BaseLoadBalancer.Clustercluster,
+protectedBaseLoadBalancer.Cluster.ActionpickRandomRegions(BaseLoadBalancer.Clustercluster,
 
intthisServer,
 
intotherServer)
 
@@ -331,7 +331,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getAction
-protectedBaseLoadBalancer.Cluster.ActiongetAction(intfromServer,
+protectedBaseLoadBalancer.Cluster.ActiongetAction(intfromServer,
 intfromRegion,
 inttoServer,
 inttoRegion)
@@ -343,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRandomIterationOrder
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">IntegergetRandomIterationOrder(intlength)
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">IntegergetRandomIterationOrder(intlength)
 Returns a random iteration order of indexes of an array 
with size length
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7784a93a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
--

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
index a691301..9a8f45d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
@@ -165,3337 +165,3323 @@
 157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
 158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
-170import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
-171import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-172import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action;
-173import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-174import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-175import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-176import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
-178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
-188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
-189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
-190import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
-191import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-192import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
-193import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-194import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult;
-195import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
-196import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-197import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-198import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-199import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-200import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
-201import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
-202import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
-203import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 0865b8f..596b800 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -30,1795 +30,1790 @@
 022import java.util.ArrayList;
 023import java.util.Collection;
 024import java.util.Collections;
-025import java.util.Comparator;
-026import java.util.HashMap;
-027import java.util.HashSet;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Set;
-031import 
java.util.concurrent.CopyOnWriteArrayList;
-032import java.util.concurrent.Future;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicBoolean;
-035import 
java.util.concurrent.locks.Condition;
-036import 
java.util.concurrent.locks.ReentrantLock;
-037import java.util.stream.Collectors;
-038
-039import org.apache.commons.logging.Log;
-040import 
org.apache.commons.logging.LogFactory;
-041import 
org.apache.hadoop.conf.Configuration;
-042import 
org.apache.hadoop.hbase.HBaseIOException;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.HRegionInfo;
-045import 
org.apache.hadoop.hbase.PleaseHoldException;
-046import 
org.apache.hadoop.hbase.RegionException;
-047import 
org.apache.hadoop.hbase.RegionStateListener;
-048import 
org.apache.hadoop.hbase.ServerName;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-051import 
org.apache.hadoop.hbase.client.TableState;
-052import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-053import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
-054import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-055import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-056import 
org.apache.hadoop.hbase.master.AssignmentListener;
-057import 
org.apache.hadoop.hbase.master.LoadBalancer;
-058import 
org.apache.hadoop.hbase.master.MasterServices;
-059import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
-060import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-061import 
org.apache.hadoop.hbase.master.RegionPlan;
-062import 
org.apache.hadoop.hbase.master.RegionState;
-063import 
org.apache.hadoop.hbase.master.RegionState.State;
-064import 
org.apache.hadoop.hbase.master.ServerListener;
-065import 
org.apache.hadoop.hbase.master.TableStateManager;
-066import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-067import 
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
-068import 
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
-069// TODO: why are they here?
-070import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-071import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-072import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-073import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-074import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-075import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-076import 
org.apache.hadoop.hbase.procedure2.Procedure;
-077import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-078import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-079import 
org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
-080import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-081import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
-087import 
org.apache.hadoop.hbase.util.Bytes;
-088import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-089import 
org.apache.hadoop.hbase.util.Pair;
-090import 
org.apache.hadoop.hbase.util.Threads;
-091import 
org.apache.hadoop.hbase.util.VersionInfo;
+025import java.util.HashMap;
+026import java.util.HashSet;
+027import java.util.List;
+028import java.util.Map;
+029import java.util.Set;
+030import 
java.util.concurrent.CopyOnWriteArrayList;
+031import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.InlineChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.InlineChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.InlineChore.html
index afd9ccc..904b921 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.InlineChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.InlineChore.html
@@ -30,1916 +30,1984 @@
 022import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 023
 024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Set;
-032import 
java.util.concurrent.atomic.AtomicBoolean;
-033import 
java.util.concurrent.atomic.AtomicInteger;
-034import 
java.util.concurrent.atomic.AtomicLong;
-035import java.util.stream.Collectors;
-036import java.util.stream.Stream;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.CopyOnWriteArrayList;
-039import java.util.concurrent.DelayQueue;
-040import java.util.concurrent.TimeUnit;
-041
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.conf.Configuration;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.ProcedureInfo;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-049import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-050import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-051import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-052import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-053import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
-054import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout;
-055import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-057import 
org.apache.hadoop.hbase.security.User;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import 
org.apache.hadoop.hbase.util.NonceKey;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061import 
org.apache.hadoop.hbase.util.Threads;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077@InterfaceStability.Evolving
-078public class 
ProcedureExecutorTEnvironment {
-079  private static final Log LOG = 
LogFactory.getLog(ProcedureExecutor.class);
-080
-081  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-082  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-083
-084  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-085  
"hbase.procedure.worker.keep.alive.time.msec";
-086  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = Long.MAX_VALUE;
-087
-088  Testing testing = null;
-089  public static class Testing {
-090protected boolean killIfSuspended = 
false;
-091protected boolean 
killBeforeStoreUpdate = false;
-092protected boolean 
toggleKillBeforeStoreUpdate = false;
-093
-094protected boolean 
shouldKillBeforeStoreUpdate() {
-095  final boolean kill = 
this.killBeforeStoreUpdate;
-096  if 
(this.toggleKillBeforeStoreUpdate) {
-097this.killBeforeStoreUpdate = 
!kill;
-098LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-099  }
-100  return kill;
-101}
-102
-103protected boolean 
shouldKillBeforeStoreUpdate(final boolean isSuspended) {
-104  return (isSuspended  
!killIfSuspended) ? false : shouldKillBeforeStoreUpdate();
-105}
-106  }
-107
-108  public interface 
ProcedureExecutorListener {
-109void procedureLoaded(long procId);
-110void procedureAdded(long procId);
-111void procedureFinished(long 
procId);
-112  }
-113
-114  /**
-115   * Internal cleaner that removes the 
completed procedure results 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
index 06b7a03..7dabb5e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.html
@@ -216,519 +216,505 @@
 208return queueLists.toString();
 209  }
 210
-211  public synchronized void 
requestRegionsMerge(final Region a,
-212  final Region b, final boolean 
forcible, long masterSystemTime, User user) {
-213try {
-214  mergePool.execute(new 
RegionMergeRequest(a, b, this.server, forcible, masterSystemTime,user));
-215  if (LOG.isDebugEnabled()) {
-216LOG.debug("Region merge requested 
for " + a + "," + b + ", forcible="
-217+ forcible + ".  " + this);
+211  public synchronized boolean 
requestSplit(final Region r) {
+212// don't split regions that are 
blocking
+213if (shouldSplitRegion()  
((HRegion)r).getCompactPriority() = Store.PRIORITY_USER) {
+214  byte[] midKey = 
((HRegion)r).checkSplit();
+215  if (midKey != null) {
+216requestSplit(r, midKey);
+217return true;
 218  }
-219} catch (RejectedExecutionException 
ree) {
-220  LOG.warn("Could not execute merge 
for " + a + "," + b + ", forcible="
-221  + forcible, ree);
-222}
-223  }
-224
-225  public synchronized boolean 
requestSplit(final Region r) {
-226// don't split regions that are 
blocking
-227if (shouldSplitRegion()  
((HRegion)r).getCompactPriority() = Store.PRIORITY_USER) {
-228  byte[] midKey = 
((HRegion)r).checkSplit();
-229  if (midKey != null) {
-230requestSplit(r, midKey);
-231return true;
-232  }
-233}
-234return false;
-235  }
-236
-237  public synchronized void 
requestSplit(final Region r, byte[] midKey) {
-238requestSplit(r, midKey, null);
-239  }
-240
-241  /*
-242   * The User parameter allows the split 
thread to assume the correct user identity
-243   */
-244  public synchronized void 
requestSplit(final Region r, byte[] midKey, User user) {
-245if (midKey == null) {
-246  LOG.debug("Region " + 
r.getRegionInfo().getRegionNameAsString() +
-247" not splittable because 
midkey=null");
-248  if 
(((HRegion)r).shouldForceSplit()) {
-249((HRegion)r).clearSplit();
-250  }
-251  return;
-252}
-253try {
-254  this.splits.execute(new 
SplitRequest(r, midKey, this.server, user));
-255  if (LOG.isDebugEnabled()) {
-256LOG.debug("Splitting " + r + ", " 
+ this);
-257  }
-258} catch (RejectedExecutionException 
ree) {
-259  LOG.info("Could not execute split 
for " + r, ree);
-260}
-261  }
-262
-263  @Override
-264  public synchronized 
ListCompactionRequest requestCompaction(final Region r, final String 
why)
-265  throws IOException {
-266return requestCompaction(r, why, 
null);
-267  }
-268
-269  @Override
-270  public synchronized 
ListCompactionRequest requestCompaction(final Region r, final String 
why,
-271  ListPairCompactionRequest, 
Store requests) throws IOException {
-272return requestCompaction(r, why, 
Store.NO_PRIORITY, requests, null);
-273  }
-274
-275  @Override
-276  public synchronized CompactionRequest 
requestCompaction(final Region r, final Store s,
-277  final String why, CompactionRequest 
request) throws IOException {
-278return requestCompaction(r, s, why, 
Store.NO_PRIORITY, request, null);
-279  }
-280
-281  @Override
-282  public synchronized 
ListCompactionRequest requestCompaction(final Region r, final String 
why,
-283  int p, 
ListPairCompactionRequest, Store requests, User user) throws 
IOException {
-284return requestCompactionInternal(r, 
why, p, requests, true, user);
-285  }
-286
-287  private ListCompactionRequest 
requestCompactionInternal(final Region r, final String why,
-288  int p, 
ListPairCompactionRequest, Store requests, boolean selectNow, 
User user)
-289  throws IOException {
-290// not a special compaction request, 
so make our own list
-291ListCompactionRequest ret = 
null;
-292if (requests == null) {
-293  ret = selectNow ? new 
ArrayListCompactionRequest(r.getStores().size()) : null;
-294  for (Store s : r.getStores()) {
-295CompactionRequest cr = 
requestCompactionInternal(r, s, why, p, null, selectNow, user);
-296if (selectNow) ret.add(cr);
-297  }
-298} else {
-299  
Preconditions.checkArgument(selectNow); // only system requests have selectNow 
== false
-300  ret = new 
ArrayListCompactionRequest(requests.size());
-301  for (PairCompactionRequest, 
Store pair : requests) {
-302

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScan.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScan.html
index 273e3e0..23ebe91 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScan.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScan.html
@@ -272,7 +272,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.client.Scan
-addColumn,
 addFamily,
 createScanFromCursor,
 getAllowPartialResults,
 getBatch,
 getCacheBlocks,
 getCaching,
 getFamilies,
 getFamilyMap,
 getFilter, getFingerprint,
 getLimit,
 getMaxResultSize,
 getMaxResultsPerColumnFamily,
 getMaxVersions,
 getReadType,
 getRowOffsetPerColumnFamily,
 getScanMetrics,
 getStartRow, getStopRow,
 hasFamilies,
 hasFilter,
 includeStartRow,
 includeStopRow,
 isAsyncPrefetch,
 isGetScan,
 isNeedCursorResult,
 isRaw,
 isRe
 versed, isScanMetricsEnabled,
 isSmall,
 numFamilies,
 setACL,
 setACL,
 setAllowPartialResults,
 setAsyncPrefetch,
 setAttribute,
 setAuthorizations,
 setBatch,
 setCacheBlocks,
 setCaching,
 setColumnFamilyTimeRange,
 setColumnFamilyTimeRange,
 setConsistency,
 setFamilyMap,
 setFilter,
 setId,
 setIsolationLevel,
 setLimit,
 setLoadColumnFamiliesOnDemand,
 setMaxResultSize,
 setMaxResultsPerColumnFamily,
 setMaxVersions,
 setMaxVersions, setNeedCursorResult,
 setOneRowLimit,
 setRaw,
 setReadType,
 setReplicaId,
 setReversed,
 setRowOffsetPerColumnFamily,
 setRowPrefixFilter,
 setScanMetricsEnabled,
 setSmall,
 setStartRow,
 setStopRow,
 setTimeRange,
 setTimeRange,
 setTimeStamp,
 toMap,
 withStartRow,
 withStartRow, withStopRow,
 withStopRow
+addColumn,
 addFamily,
 createScanFromCursor,
 getAllowPartialResults,
 getBatch,
 getCacheBlocks,
 getCaching,
 getFamilies,
 getFamilyMap,
 getFilter, getFingerprint,
 getLimit,
 getMaxResultSize,
 getMaxResultsPerColumnFamily,
 getMaxVersions,
 getReadType,
 getRowOffsetPerColumnFamily,
 getScanMetrics,
 getStartRow, getStopRow,
 hasFamilies,
 hasFilter,
 includeStartRow,
 includeStopRow,
 isAsyncPrefetch,
 isGetScan,
 isNeedCursorResult,
 isRaw,
 isRe
 versed, isScanMetricsEnabled,
 isSmall,
 numFamilies,
 setACL,
 setACL,
 setAllowPartialResults,
 setAsyncPrefetch,
 setAttribute,
 setAuthorizations,
 setBatch,
 setCacheBlocks,
 setCaching,
 setColumnFamilyTimeRange,
 setColumnFamilyTimeRange,
 setConsistency,
 setFamilyMap,
 setFilter,
 setId,
 setIsolationLevel,
 setLimit,
 setLoadColumnFamiliesOnDemand,
 setMaxResultSize,
 setMaxResultsPerColumnFamily,
 setMaxVersions,
 setMaxVersions, setNeedCursorResult,
 setOneRowLimit,
 setPriority,
 setRaw,
 setReadType,
 setReplicaId,
 setReversed,
 setRowOffsetPerColumnFamily,
 setRowPrefixFilter, setScanMetricsEnabled,
 setSmall,
 setStartRow,
 setStopRow,
 setTimeRange,
 setTimeRange,
 setTimeStamp,
 toMap,
 withStartRow, withStartRow,
 withStopRow,
 withStopRow
 
 
 
@@ -286,7 +286,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.client.OperationWithAttributes
-getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId
+getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId,
 getPriority
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 3aff28e..0006a5a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -692,20 +692,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
-org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.RegionOpeningState

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
index b54153b..655084f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
@@ -149,415 +149,417 @@
 141  throw new 
IllegalArgumentException("Name is null or empty");
 142}
 143
-144int namespaceDelimIndex = 
com.google.common.primitives.Bytes.lastIndexOf(tableName,
-145(byte) NAMESPACE_DELIM);
-146if (namespaceDelimIndex  0){
-147  
isLegalTableQualifierName(tableName);
-148} else {
-149  isLegalNamespaceName(tableName, 0, 
namespaceDelimIndex);
-150  
isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, 
tableName.length);
-151}
-152return tableName;
-153  }
-154
-155  public static byte [] 
isLegalTableQualifierName(final byte[] qualifierName) {
-156
isLegalTableQualifierName(qualifierName, 0, qualifierName.length, false);
-157return qualifierName;
-158  }
-159
-160  public static byte [] 
isLegalTableQualifierName(final byte[] qualifierName, boolean isSnapshot) {
-161
isLegalTableQualifierName(qualifierName, 0, qualifierName.length, 
isSnapshot);
-162return qualifierName;
-163  }
-164
+144int namespaceDelimIndex =
+145  
org.apache.hadoop.hbase.shaded.com.google.common.primitives.Bytes.lastIndexOf(tableName,
+146(byte) NAMESPACE_DELIM);
+147if (namespaceDelimIndex  0){
+148  
isLegalTableQualifierName(tableName);
+149} else {
+150  isLegalNamespaceName(tableName, 0, 
namespaceDelimIndex);
+151  
isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, 
tableName.length);
+152}
+153return tableName;
+154  }
+155
+156  public static byte [] 
isLegalTableQualifierName(final byte[] qualifierName) {
+157
isLegalTableQualifierName(qualifierName, 0, qualifierName.length, false);
+158return qualifierName;
+159  }
+160
+161  public static byte [] 
isLegalTableQualifierName(final byte[] qualifierName, boolean isSnapshot) {
+162
isLegalTableQualifierName(qualifierName, 0, qualifierName.length, 
isSnapshot);
+163return qualifierName;
+164  }
 165
-166  /**
-167   * Qualifier names can only contain 
'word' characters
-168   * 
code[\p{IsAlphabetic}\p{Digit}]/code or '_', '.' or '-'.
-169   * The name may not start with '.' or 
'-'.
-170   *
-171   * @param qualifierName byte array 
containing the qualifier name
-172   * @param start start index
-173   * @param end end index (exclusive)
-174   */
-175  public static void 
isLegalTableQualifierName(final byte[] qualifierName,
-176  
  int start,
-177  
  int end) {
-178  
isLegalTableQualifierName(qualifierName, start, end, false);
-179  }
-180
-181  public static void 
isLegalTableQualifierName(final byte[] qualifierName,
-182  
  int start,
-183  
  int end,
-184  
  boolean isSnapshot) {
-185if(end - start  1) {
-186  throw new 
IllegalArgumentException(isSnapshot ? "Snapshot" : "Table" + " qualifier must 
not be empty");
-187}
-188if (qualifierName[start] == '.' || 
qualifierName[start] == '-') {
-189  throw new 
IllegalArgumentException("Illegal first character " + qualifierName[start] 
+
-190 
" at 0. " + (isSnapshot ? "Snapshot" : "User-space table") +
-191 
" qualifiers can only start with 'alphanumeric " +
-192 
"characters' from any language: " +
-193 
Bytes.toString(qualifierName, start, end));
-194}
-195// Treat the bytes as UTF-8
-196String qualifierString = new 
String(
-197qualifierName, start, (end - 
start), StandardCharsets.UTF_8);
-198if 
(qualifierString.equals(DISALLOWED_TABLE_NAME)) {
-199  // Per 
https://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkDataModel
-200  // A znode named "zookeeper" is 
disallowed by zookeeper.
-201  throw new 
IllegalArgumentException("Tables may not be named '" + DISALLOWED_TABLE_NAME + 
"'");
-202}
-203for (int i = 0; i  
qualifierString.length(); i++) {
-204  // Treat the string as a char-array 
as some characters may be multi-byte
-205  char c = 
qualifierString.charAt(i);
-206  // Check for letter, digit, 
underscore, hyphen, or period, and allowed by ZK.
-207  // ZooKeeper also has limitations, 
but Character.isAlphabetic omits those all
-208  //   See 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+191import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
index 20c973a..5951505 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
@@ -35,646 +35,644 @@
 027import java.util.ArrayList;
 028import java.util.List;
 029import java.util.Optional;
-030import java.util.Random;
-031import 
java.util.concurrent.ExecutionException;
-032import 
java.util.concurrent.atomic.AtomicInteger;
-033import java.util.stream.Collectors;
-034
-035import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.HRegionLocation;
-039import 
org.apache.hadoop.hbase.ServerName;
-040import 
org.apache.hadoop.hbase.TableName;
-041import 
org.apache.hadoop.hbase.master.HMaster;
-042import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-043import 
org.apache.hadoop.hbase.master.RegionState;
-044import 
org.apache.hadoop.hbase.master.ServerManager;
-045import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-046import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-047import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-048import 
org.apache.hadoop.hbase.regionserver.Region;
-049import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-050import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-051import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import org.junit.Assert;
-056import org.junit.Ignore;
-057import org.junit.Test;
-058import 
org.junit.experimental.categories.Category;
-059import org.junit.runner.RunWith;
-060import org.junit.runners.Parameterized;
-061
-062/**
-063 * Class to test asynchronous region 
admin operations.
-064 */
-065@RunWith(Parameterized.class)
-066@Category({ LargeTests.class, 
ClientTests.class })
-067public class TestAsyncRegionAdminApi 
extends TestAsyncAdminBase {
-068
-069  public static Random RANDOM = new 
Random(System.currentTimeMillis());
-070
-071  @Test
-072  public void testCloseRegion() throws 
Exception {
-073
createTableWithDefaultConf(tableName);
-074
-075HRegionInfo info = null;
-076HRegionServer rs = 
TEST_UTIL.getRSForFirstRegionInTable(tableName);
-077ListHRegionInfo onlineRegions 
= ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
-078for (HRegionInfo regionInfo : 
onlineRegions) {
-079  if 
(!regionInfo.getTable().isSystemTable()) {
-080info = regionInfo;
-081boolean closed = 
admin.closeRegion(regionInfo.getRegionName(),
-082  
Optional.of(rs.getServerName())).get();
-083assertTrue(closed);
-084  }
-085}
-086boolean isInList = 
ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
-087long timeout = 
System.currentTimeMillis() + 1;
-088while ((System.currentTimeMillis() 
 timeout)  (isInList)) {
-089  Thread.sleep(100);
-090  isInList = 
ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()).contains(info);
-091}
+030import 
java.util.concurrent.ExecutionException;
+031import 
java.util.concurrent.atomic.AtomicInteger;
+032import java.util.stream.Collectors;
+033
+034import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
+035import 
org.apache.hadoop.hbase.HConstants;
+036import 
org.apache.hadoop.hbase.HRegionInfo;
+037import 
org.apache.hadoop.hbase.HRegionLocation;
+038import 
org.apache.hadoop.hbase.ServerName;
+039import 
org.apache.hadoop.hbase.TableName;
+040import 
org.apache.hadoop.hbase.master.HMaster;
+041import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
+042import 
org.apache.hadoop.hbase.master.RegionState;
+043import 
org.apache.hadoop.hbase.master.ServerManager;
+044import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+045import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
+046import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+047import 
org.apache.hadoop.hbase.regionserver.Region;
+048import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+049import 
org.apache.hadoop.hbase.testclassification.ClientTests;
+050import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+051import 
org.apache.hadoop.hbase.util.Bytes;
+052import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
+053import 
org.apache.hadoop.hbase.util.Threads;
+054import org.junit.Assert;
+055import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.html
index bca52d7..9eea520 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.html
@@ -28,70 +28,69 @@
 020import java.io.Closeable;
 021import java.io.IOException;
 022import java.nio.ByteBuffer;
-023import 
java.nio.channels.CompletionHandler;
-024import 
java.util.concurrent.CompletableFuture;
-025
-026import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-027import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-028import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-029
-030/**
-031 * Interface for asynchronous filesystem 
output stream.
-032 */
-033@InterfaceAudience.Private
-034public interface AsyncFSOutput extends 
Closeable {
-035
-036  /**
-037   * Just call write(b, 0, b.length).
-038   * @see #write(byte[], int, int)
-039   */
-040  void write(byte[] b);
-041
-042  /**
-043   * Copy the data into the buffer. Note 
that you need to call {@link #flush(boolean)} to flush the
-044   * buffer manually.
-045   */
-046  void write(byte[] b, int off, int 
len);
-047
-048  /**
-049   * Write an int to the buffer.
-050   */
-051  void writeInt(int i);
-052
-053  /**
-054   * Copy the data in the given {@code 
bb} into the buffer.
-055   */
-056  void write(ByteBuffer bb);
-057
-058  /**
-059   * Return the current size of buffered 
data.
-060   */
-061  int buffered();
-062
-063  /**
-064   * Return current pipeline. Empty array 
if no pipeline.
-065   */
-066  DatanodeInfo[] getPipeline();
-067
-068  /**
-069   * Flush the buffer out.
-070   * @param sync persistent the data to 
device
-071   * @return A CompletableFuture that 
hold the acked length after flushing.
-072   */
-073  CompletableFutureLong 
flush(boolean sync);
-074
-075  /**
-076   * The close method when error 
occurred.
-077   */
-078  void 
recoverAndClose(CancelableProgressable reporter) throws IOException;
-079
-080  /**
-081   * Close the file. You should call 
{@link #recoverAndClose(CancelableProgressable)} if this method
-082   * throws an exception.
-083   */
-084  @Override
-085  void close() throws IOException;
-086}
+023import 
java.util.concurrent.CompletableFuture;
+024
+025import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+026import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+027import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+028
+029/**
+030 * Interface for asynchronous filesystem 
output stream.
+031 */
+032@InterfaceAudience.Private
+033public interface AsyncFSOutput extends 
Closeable {
+034
+035  /**
+036   * Just call write(b, 0, b.length).
+037   * @see #write(byte[], int, int)
+038   */
+039  void write(byte[] b);
+040
+041  /**
+042   * Copy the data into the buffer. Note 
that you need to call {@link #flush(boolean)} to flush the
+043   * buffer manually.
+044   */
+045  void write(byte[] b, int off, int 
len);
+046
+047  /**
+048   * Write an int to the buffer.
+049   */
+050  void writeInt(int i);
+051
+052  /**
+053   * Copy the data in the given {@code 
bb} into the buffer.
+054   */
+055  void write(ByteBuffer bb);
+056
+057  /**
+058   * Return the current size of buffered 
data.
+059   */
+060  int buffered();
+061
+062  /**
+063   * Return current pipeline. Empty array 
if no pipeline.
+064   */
+065  DatanodeInfo[] getPipeline();
+066
+067  /**
+068   * Flush the buffer out.
+069   * @param sync persistent the data to 
device
+070   * @return A CompletableFuture that 
hold the acked length after flushing.
+071   */
+072  CompletableFutureLong 
flush(boolean sync);
+073
+074  /**
+075   * The close method when error 
occurred.
+076   */
+077  void 
recoverAndClose(CancelableProgressable reporter) throws IOException;
+078
+079  /**
+080   * Close the file. You should call 
{@link #recoverAndClose(CancelableProgressable)} if this method
+081   * throws an exception.
+082   */
+083  @Override
+084  void close() throws IOException;
+085}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
index e6485b5..5fb4dd3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
@@ -28,153 +28,154 @@
 020import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 7816844..f2e7625 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -464,11 +464,29 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isCatalogJanitorOn()
+Query on the catalog janitor state.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isCleanerChoreOn()
+Query the current state of the cleaner chore.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 isMasterInMaintenanceMode()
 Check whether master is in maintenance mode
 
 
-
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isNormalizerOn()
+Query the current state of the region normalizer
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 isProcedureFinished(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringsignature,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringinstance,
@@ -476,13 +494,13 @@ implements Check the current state of the specified procedure.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 isSnapshotFinished(SnapshotDescriptionsnapshot)
 Check the current state of the passed snapshot.
 
 
-
+
 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
index 912d069..e50b37b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
@@ -521,7 +521,11 @@
 513   * @return {@link LockManager} to lock 
namespaces/tables/regions.
 514   */
 515  LockManager getLockManager();
-516}
+516
+517  public String 
getRegionServerVersion(final ServerName sn);
+518
+519  public void 
checkIfShouldMoveSystemRegionAsync();
+520}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index 65900b3..4de2702 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -1214,13 +1214,13 @@
 1206  /**
 1207   * Creates a list of possible 
destinations for a region. It contains the online servers, but not
 1208   *  the draining or dying servers.
-1209   *  @param serverToExclude can be null 
if there is no server to exclude
+1209   *  @param serversToExclude can be 
null if there is no server to exclude
 1210   */
-1211  public ListServerName 
createDestinationServersList(final ServerName serverToExclude){
+1211  public ListServerName 
createDestinationServersList(final ListServerName serversToExclude){
 1212final ListServerName 
destServers = getOnlineServersList();
 1213
-1214if (serverToExclude != null){
-1215  
destServers.remove(serverToExclude);
+1214if (serversToExclude != null){
+1215  
destServers.removeAll(serversToExclude);
 1216}
 1217
 1218// Loop through the draining server 
list and remove them from the server list



[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
new file mode 100644
index 000..6236ecd
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -0,0 +1,175 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+
+
+
+
+
+Packages that use RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
+
+
+
+
+
+
+
+Uses of RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
 in org.apache.hadoop.hbase.client
+
+Subclasses of RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
 in org.apache.hadoop.hbase.client
+
+Modifier and Type
+Class and Description
+
+
+
+private class
+RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+
+
+private class
+RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer
+
+
+private class
+RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
new file mode 100644
index 000..07f8d5f
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -0,0 +1,235 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer (Apache 
HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+
+
+
+Packages that use RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
+
+
+
+
+
+
+
+Uses of RawAsyncHBaseAdmin.ProcedureBiConsumer in 
org.apache.hadoop.hbase.client
+
+Subclasses of RawAsyncHBaseAdmin.ProcedureBiConsumer in 
org.apache.hadoop.hbase.client
+
+Modifier and Type
+Class and Description
+
+
+
+private class
+RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+
+
+private class
+RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+
+

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400
-1401  // Now if we found a region 
load get the type of cost that was requested.
-1402  if (regionLoadList != 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-101import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
index 3a57bbd..b408e5f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshot.html
@@ -30,182 +30,184 @@
 022import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 023import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 024import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-025
-026/**
-027 * A point-in-time view of a space quota 
on a table.
-028 */
-029@InterfaceAudience.Private
-030public class SpaceQuotaSnapshot {
-031  private static final SpaceQuotaSnapshot 
NO_SUCH_SNAPSHOT = new SpaceQuotaSnapshot(
-032  SpaceQuotaStatus.notInViolation(), 
0, Long.MAX_VALUE);
-033  private final SpaceQuotaStatus 
quotaStatus;
-034  private final long usage;
-035  private final long limit;
-036
-037  /**
-038   * Encapsulates the state of a quota on 
a table. The quota may or may not be in violation.
-039   * If the quota is not in violation, 
the violation may be null. If the quota is in violation,
-040   * there is guaranteed to be a non-null 
violation policy.
-041   */
-042  @InterfaceAudience.Private
-043  public static class SpaceQuotaStatus 
{
-044private static final SpaceQuotaStatus 
NOT_IN_VIOLATION = new SpaceQuotaStatus(null, false);
-045final SpaceViolationPolicy policy;
-046final boolean inViolation;
-047
-048/**
-049 * Constructs a {@code 
SpaceQuotaSnapshot} which is in violation of the provided {@code policy}.
-050 *
-051 * Use {@link #notInViolation()} to 
obtain an instance of this class for the cases when the
-052 * quota is not in violation.
-053 *
-054 * @param policy The non-null policy 
being violated.
-055 */
-056public 
SpaceQuotaStatus(SpaceViolationPolicy policy) {
-057  // If the caller is instantiating a 
status, the policy must be non-null
-058  this 
(Objects.requireNonNull(policy), true);
-059}
-060
-061private 
SpaceQuotaStatus(SpaceViolationPolicy policy, boolean inViolation) {
-062  this.policy = policy;
-063  this.inViolation = inViolation;
-064}
-065
-066/**
-067 * Returns the violation policy, 
which may be null. It is guaranteed to be non-null if
-068 * {@link #isInViolation()} is {@code 
true}, but may be null otherwise.
-069 */
-070public SpaceViolationPolicy 
getPolicy() {
-071  return policy;
-072}
-073
-074/**
-075 * @return {@code true} if the quota 
is being violated, {@code false} otherwise.
-076 */
-077public boolean isInViolation() {
-078  return inViolation;
-079}
-080
-081/**
-082 * Returns a singleton referring to a 
quota which is not in violation.
-083 */
-084public static SpaceQuotaStatus 
notInViolation() {
-085  return NOT_IN_VIOLATION;
-086}
-087
-088@Override
-089public int hashCode() {
-090  return new 
HashCodeBuilder().append(policy == null ? 0 : policy.hashCode())
-091  
.append(inViolation).toHashCode();
-092}
-093
-094@Override
-095public boolean equals(Object o) {
-096  if (o instanceof SpaceQuotaStatus) 
{
-097SpaceQuotaStatus other = 
(SpaceQuotaStatus) o;
-098return Objects.equals(policy, 
other.policy)  inViolation == other.inViolation;
-099  }
-100  return false;
-101}
-102
-103@Override
-104public String toString() {
-105  StringBuilder sb = new 
StringBuilder(getClass().getSimpleName());
-106  
sb.append("[policy=").append(policy);
-107  sb.append(", 
inViolation=").append(inViolation).append("]");
-108  return sb.toString();
-109}
-110
-111public static 
QuotaProtos.SpaceQuotaStatus toProto(SpaceQuotaStatus status) {
-112  
QuotaProtos.SpaceQuotaStatus.Builder builder = 
QuotaProtos.SpaceQuotaStatus.newBuilder();
-113  
builder.setInViolation(status.inViolation);
-114  if (status.isInViolation()) {
-115
builder.setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(status.getPolicy()));
-116  }
-117  return builder.build();
-118}
-119
-120public static SpaceQuotaStatus 
toStatus(QuotaProtos.SpaceQuotaStatus proto) {
-121  if (proto.getInViolation()) {
-122return new 
SpaceQuotaStatus(ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));
-123  } else {
-124return NOT_IN_VIOLATION;
-125  }
-126}
-127  }
-128
-129  public 
SpaceQuotaSnapshot(SpaceQuotaStatus quotaStatus, long usage, long limit) {
-130this.quotaStatus = 
Objects.requireNonNull(quotaStatus);
-131this.usage = usage;
-132this.limit 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
index 698e78d..a5aadc9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":9,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":42,"i61":42,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":9,"i73":10,"i74":10,"i75":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":42,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":42,"i56":42,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":9,"i67":10,"i68":10,"i69":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -112,17 +112,13 @@ var activeTableTab = "activeTableTab";
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTableDescriptorBuilder.ModifyableTableDescriptor,
 TableDescriptor
 
 
-Direct Known Subclasses:
-ImmutableHTableDescriptor.UnmodifyableTableDescriptor
-
-
 Enclosing class:
 TableDescriptorBuilder
 
 
 
 @InterfaceAudience.Private
-public static class TableDescriptorBuilder.ModifyableTableDescriptor
+public static class TableDescriptorBuilder.ModifyableTableDescriptor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements TableDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTableDescriptorBuilder.ModifyableTableDescriptor
 TODO: make this private after removing the 
HTableDescriptor
@@ -151,9 +147,9 @@ implements 
-private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],HColumnDescriptor
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],ColumnFamilyDescriptor
 families
-Maps column family name to the respective 
HColumnDescriptors
+Maps column family name to the respective 
FamilyDescriptors
 
 
 
@@ -167,6 +163,13 @@ implements 
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.client.TableDescriptor
+COMPARATOR
+
 
 
 
@@ -182,25 +185,29 @@ implements Constructor and Description
 
 
-protected 
-ModifyableTableDescriptor(TableDescriptordesc)
-Construct a table descriptor by cloning the descriptor 
passed as a
- parameter.
-
+private 
+ModifyableTableDescriptor(TableDescriptordesc)
 
 
-private 
+
 ModifyableTableDescriptor(TableNamename)
 Construct a table descriptor specifying a TableName 
object
 
 
 
-
+private 
 ModifyableTableDescriptor(TableNamename,
- http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHColumnDescriptorfamilies,
+ http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionColumnFamilyDescriptorfamilies,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytesvalues,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
index e97089d..b20e058 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
@@ -4,7 +4,7 @@
 
 
 
-FullTableBackupClient (Apache HBase 2.0.0-SNAPSHOT API)
+FullTableBackupClient (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 

Methods inherited from class org.apache.hadoop.hbase.backup.impl.TableBackupClient

-addManifest, beginBackup, cleanupAndRestoreBackupSystem, cleanupDistCpLog, cleanupExportSnapshotLog, cleanupTargetDir, completeBackup, deleteBackupTableSnapshot, deleteSnapshots, failBackup, failStageIf, getMessage, getTestStage, init, obtainBackupMetaDataStr,

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 324ef65..e8620bd 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HStore
+public class HStore
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements Store
 A Store holds a column family in a Region.  Its a memstore 
and a set of zero
@@ -270,7 +270,7 @@ implements family
 
 
-(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
 filesCompacting
 
 
@@ -432,7 +432,7 @@ implements 
 private void
-addToCompactingFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefilesToAdd)
+addToCompactingFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefilesToAdd)
 Adds the files to compacting files.
 
 
@@ -455,7 +455,7 @@ implements 
 private void
-bulkLoadHFile(StoreFilesf)
+bulkLoadHFile(StoreFilesf)
 
 
 void
@@ -471,10 +471,10 @@ implements 
 private void
-clearCompactedfiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefilesToRemove)
+clearCompactedfiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefilesToRemove)
 
 
-com.google.common.collect.ImmutableCollectionStoreFile
+com.google.common.collect.ImmutableCollectionStoreFile
 close()
 Close all the readers We don't need to worry about 
subsequent requests because the Region
  holds a write lock that will prevent any more reads or writes.
@@ -487,20 +487,20 @@ implements 
-private StoreFile
+private StoreFile
 commitFile(org.apache.hadoop.fs.Pathpath,
   longlogCacheFlushId,
   MonitoredTaskstatus)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
 compact(CompactionContextcompaction,
ThroughputControllerthroughputController)
 Compact the StoreFiles.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
 compact(CompactionContextcompaction,
ThroughputControllerthroughputController,
Useruser)
@@ -513,7 +513,7 @@ implements 
 protected void
-completeCompaction(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilecompactedFiles)
+completeCompaction(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilecompactedFiles)
 It works by processing a compaction that's been written to 
disk.
 
 
@@ -550,11 +550,11 @@ implements 
-private StoreFile
+private StoreFile
 createStoreFileAndReader(org.apache.hadoop.fs.Pathp)
 
 
-private StoreFile
+private StoreFile
 createStoreFileAndReader(StoreFileInfoinfo)
 
 
@@ -803,7 +803,7 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
-getScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefiles,
+getScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFilefiles,
booleancacheBlocks,
booleanusePread,
booleanisCompaction,
@@ -857,7 +857,7 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFile
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return buf.getShort(headerSize());
-449  }
-450
-451  /**

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
index 74f06db..d0fa862 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterWalManager
+public class MasterWalManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 This class abstracts a bunch of operations the HMaster needs
  when splitting log files e.g. finding log files, dirs etc.
@@ -243,67 +243,56 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getLogDirs(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames)
 
 
-org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode
-getLogRecoveryMode()
-
-
 (package private) 
org.apache.hadoop.fs.Path
 getOldLogDir()
 Get the directory where old logs go
 
 
-
+
 (package private) SplitLogManager
 getSplitLogManager()
 
-
+
 void
 prepareLogReplay(ServerNameserverName,
 http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetHRegionInforegions)
 Mark regions in recovering state when distributedLogReplay 
are set true
 
 
-
+
 (package private) void
 removeStaleRecoveringRegionsFromZK(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamefailedServers)
 Wrapper function on SplitLogManager.removeStaleRecoveringRegions(Set)
 
 
-
-void
-setLogRecoveryMode()
-The function is used in SSH to set recovery mode based on 
configuration after all outstanding
- log split tasks drained.
-
-
-
+
 void
 splitLog(ServerNameserverName)
 
-
+
 void
 splitLog(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames)
 
-
+
 void
 splitLog(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames,
 org.apache.hadoop.fs.PathFilterfilter)
 This method is the base split method that splits WAL files 
matching a filter.
 
 
-
+
 void
 splitMetaLog(ServerNameserverName)
 Specialized method to handle the splitting for meta 
WAL
 
 
-
+
 void
 splitMetaLog(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames)
 Specialized method to handle the splitting for meta 
WAL
 
 
-
+
 void
 stop()
 
@@ -335,7 +324,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -344,7 +333,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 META_FILTER
-static finalorg.apache.hadoop.fs.PathFilter META_FILTER
+static finalorg.apache.hadoop.fs.PathFilter META_FILTER
 
 
 
@@ -353,7 +342,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NON_META_FILTER
-static finalorg.apache.hadoop.fs.PathFilter NON_META_FILTER
+static finalorg.apache.hadoop.fs.PathFilter NON_META_FILTER
 
 
 
@@ -362,7 +351,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 metricsMasterFilesystem
-private finalMetricsMasterFileSystem metricsMasterFilesystem
+private finalMetricsMasterFileSystem metricsMasterFilesystem
 
 
 
@@ -371,7 +360,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 services
-private finalMasterServices services
+private finalMasterServices services
 
 
 
@@ -380,7 +369,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-private finalorg.apache.hadoop.conf.Configuration conf
+private finalorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -389,7 +378,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 73d01b1..93ff559 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":9,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":9,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":9,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":9,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":9,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -388,45 +388,53 @@ implements quotaManager
 
 
+private QuotaObserverChore
+quotaObserverChore
+
+
 private RegionNormalizerTracker
 regionNormalizerTracker
 
-
+
 (package private) RegionServerTracker
 regionServerTracker
 
-
+
 private ReplicationManager
 replicationManager
 
-
+
 private ReplicationMetaCleaner
 replicationMetaCleaner
 
-
+
 private ReplicationZKNodeCleanerChore
 replicationZKNodeCleanerChore
 
-
+
 (package private) MemoryBoundedLogMessageBuffer
 rsFatals
 
-
+
 private ProcedureEvent
 serverCrashProcessingEnabled
 
-
+
 private ServerManager
 serverManager
 
-
+
 (package private) boolean
 serviceStarted
 
-
+
 (package private) SnapshotManager
 snapshotManager
 
+
+private SpaceQuotaSnapshotNotifier
+spaceQuotaSnapshotNotifier
+
 
 private SplitOrMergeTracker
 splitOrMergeTracker
@@ -449,7 +457,7 @@ implements HRegionServer
-cacheConfig,
 cacheFlusher,
 CLOSE,
 clusterConnection,
 clusterStatusTracker,
 compactSplitThread,
 conf,
 configurationManager,
 csm, DEFAULT_REGION_LOCK_AWAIT_TIME_SEC,
 fs,
 fsOk,
 hMemManager,
 infoServer,
 initLatch,

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 31adc62..5ca6a7f 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index ef4fba9..8ad631b 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index d0561ac..1f6ea53 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 2537239..0c3a246 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 8d3812a..1a1cdc5 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 0ecd14c..01305b4 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
   

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index de81692..75f0173 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -1529,510 +1529,540 @@
 1521  throws IOException, 
RestoreSnapshotException;
 1522
 1523  /**
-1524   * Create a new table by cloning the 
snapshot content.
-1525   *
-1526   * @param snapshotName name of the 
snapshot to be cloned
-1527   * @param tableName name of the table 
where the snapshot will be restored
-1528   * @throws IOException if a remote or 
network exception occurs
-1529   * @throws TableExistsException if 
table to be created already exists
-1530   * @throws RestoreSnapshotException if 
snapshot failed to be cloned
-1531   * @throws IllegalArgumentException if 
the specified table has not a valid name
-1532   */
-1533  void cloneSnapshot(final byte[] 
snapshotName, final TableName tableName)
-1534  throws IOException, 
TableExistsException, RestoreSnapshotException;
-1535
-1536  /**
-1537   * Create a new table by cloning the 
snapshot content.
-1538   *
-1539   * @param snapshotName name of the 
snapshot to be cloned
-1540   * @param tableName name of the table 
where the snapshot will be restored
-1541   * @throws IOException if a remote or 
network exception occurs
-1542   * @throws TableExistsException if 
table to be created already exists
-1543   * @throws RestoreSnapshotException if 
snapshot failed to be cloned
-1544   * @throws IllegalArgumentException if 
the specified table has not a valid name
-1545   */
-1546  void cloneSnapshot(final String 
snapshotName, final TableName tableName)
-1547  throws IOException, 
TableExistsException, RestoreSnapshotException;
-1548
-1549  /**
-1550   * Create a new table by cloning the 
snapshot content, but does not block
-1551   * and wait for it be completely 
cloned.
-1552   * You can use Future.get(long, 
TimeUnit) to wait on the operation to complete.
-1553   * It may throw ExecutionException if 
there was an error while executing the operation
-1554   * or TimeoutException in case the 
wait timeout was not long enough to allow the
-1555   * operation to complete.
-1556   *
-1557   * @param snapshotName name of the 
snapshot to be cloned
-1558   * @param tableName name of the table 
where the snapshot will be restored
-1559   * @throws IOException if a remote or 
network exception occurs
-1560   * @throws TableExistsException if 
table to be cloned already exists
-1561   * @return the result of the async 
clone snapshot. You can use Future.get(long, TimeUnit)
-1562   *to wait on the operation to 
complete.
-1563   */
-1564  FutureVoid 
cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-1565  throws IOException, 
TableExistsException;
-1566
-1567  /**
-1568   * Execute a distributed procedure on 
a cluster.
-1569   *
-1570   * @param signature A distributed 
procedure is uniquely identified by its signature (default the
-1571   * root ZK node name of the 
procedure).
-1572   * @param instance The instance name 
of the procedure. For some procedures, this parameter is
-1573   * optional.
-1574   * @param props Property/Value pairs 
of properties passing to the procedure
-1575   * @throws IOException
-1576   */
-1577  void execProcedure(String signature, 
String instance, MapString, String props)
-1578  throws IOException;
-1579
-1580  /**
-1581   * Execute a distributed procedure on 
a cluster.
-1582   *
-1583   * @param signature A distributed 
procedure is uniquely identified by its signature (default the
-1584   * root ZK node name of the 
procedure).
-1585   * @param instance The instance name 
of the procedure. For some procedures, this parameter is
-1586   * optional.
-1587   * @param props Property/Value pairs 
of properties passing to the procedure
-1588   * @return data returned after 
procedure execution. null if no return data.
-1589   * @throws IOException
-1590   */
-1591  byte[] execProcedureWithRet(String 
signature, String instance, MapString, String props)
-1592  throws IOException;
-1593
-1594  /**
-1595   * Check the current state of the 
specified procedure. There are three possible states: ol
-1596   * lirunning - returns 
ttfalse/tt/li lifinished - returns 
tttrue/tt/li
-1597   * lifinished with error - 
throws the exception that caused the procedure to fail/li /ol
-1598   *
-1599   * @param signature The signature that 
uniquely identifies a procedure
-1600   * @param instance The instance name 
of the procedure
-1601   * @param props Property/Value pairs 
of properties passing to the procedure
-1602   * @return true if the specified 
procedure is finished successfully, 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
index 7f61b54..35e8890 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
@@ -23,1363 +23,690 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package org.apache.hadoop.hbase.ipc;
-020
-021import java.io.IOException;
-022import java.io.InputStream;
-023import java.net.BindException;
-024import java.net.InetAddress;
-025import java.net.InetSocketAddress;
-026import java.net.ServerSocket;
-027import java.net.Socket;
-028import java.net.SocketException;
-029import java.net.UnknownHostException;
-030import java.nio.ByteBuffer;
-031import 
java.nio.channels.CancelledKeyException;
-032import 
java.nio.channels.ClosedChannelException;
-033import 
java.nio.channels.GatheringByteChannel;
-034import 
java.nio.channels.ReadableByteChannel;
-035import java.nio.channels.SelectionKey;
-036import java.nio.channels.Selector;
-037import 
java.nio.channels.ServerSocketChannel;
-038import java.nio.channels.SocketChannel;
-039import java.util.ArrayList;
-040import java.util.Arrays;
-041import java.util.Collections;
-042import java.util.Iterator;
-043import java.util.List;
-044import java.util.Set;
-045import java.util.Timer;
-046import java.util.TimerTask;
-047import 
java.util.concurrent.ConcurrentHashMap;
-048import 
java.util.concurrent.ConcurrentLinkedDeque;
-049import 
java.util.concurrent.ExecutorService;
-050import java.util.concurrent.Executors;
-051import 
java.util.concurrent.LinkedBlockingQueue;
-052import 
java.util.concurrent.atomic.AtomicInteger;
-053import 
java.util.concurrent.atomic.LongAdder;
-054import java.util.concurrent.locks.Lock;
-055import 
java.util.concurrent.locks.ReentrantLock;
-056
-057import 
org.apache.hadoop.conf.Configuration;
-058import 
org.apache.hadoop.hbase.CellScanner;
-059import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-060import 
org.apache.hadoop.hbase.HBaseIOException;
-061import 
org.apache.hadoop.hbase.HConstants;
-062import org.apache.hadoop.hbase.Server;
-063import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-064import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-065import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-066import 
org.apache.hadoop.hbase.exceptions.RequestTooBigException;
-067import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-068import 
org.apache.hadoop.hbase.nio.ByteBuff;
-069import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-070import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-071import 
org.apache.hadoop.hbase.security.AuthMethod;
-072import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-073import 
org.apache.hadoop.hbase.security.SaslStatus;
-074import 
org.apache.hadoop.hbase.security.SaslUtil;
-075import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-076import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
-077import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-078import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-081import 
org.apache.hadoop.hbase.util.Bytes;
-082import 
org.apache.hadoop.hbase.util.Pair;
-083import 
org.apache.hadoop.hbase.util.Threads;
-084import org.apache.hadoop.io.IOUtils;
-085import 
org.apache.hadoop.io.IntWritable;
-086import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-087import 
org.apache.hadoop.util.StringUtils;
-088import org.apache.htrace.TraceInfo;
-089
-090import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
+018package org.apache.hadoop.hbase.ipc;
+019
+020import java.io.IOException;
+021import java.net.BindException;
+022import java.net.InetSocketAddress;
+023import java.net.ServerSocket;
+024import java.net.SocketException;
+025import java.net.UnknownHostException;
+026import 
java.nio.channels.CancelledKeyException;
+027import 
java.nio.channels.GatheringByteChannel;
+028import java.nio.channels.SelectionKey;
+029import java.nio.channels.Selector;
+030import 
java.nio.channels.ServerSocketChannel;
+031import java.nio.channels.SocketChannel;
+032import java.util.Collections;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Set;
+036import java.util.Timer;
+037import java.util.TimerTask;

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index 7e37ca0..79c65e3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -70,1527 +70,1525 @@
 062import com.google.common.collect.Sets;
 063
 064/**
-065 * The base class for load balancers. It 
provides the the functions used to by
-066 * {@link 
org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
-067 * in the edge cases. It doesn't provide 
an implementation of the
-068 * actual balancing algorithm.
-069 *
-070 */
-071public abstract class BaseLoadBalancer 
implements LoadBalancer {
-072  protected static final int 
MIN_SERVER_BALANCE = 2;
-073  private volatile boolean stopped = 
false;
+065 * The base class for load balancers. It 
provides functions used by
+066 * {@link 
org.apache.hadoop.hbase.master.AssignmentManager} to assign regions in the edge 
cases.
+067 * It doesn't provide an implementation 
of the actual balancing algorithm.
+068 */
+069public abstract class BaseLoadBalancer 
implements LoadBalancer {
+070  protected static final int 
MIN_SERVER_BALANCE = 2;
+071  private volatile boolean stopped = 
false;
+072
+073  private static final 
ListHRegionInfo EMPTY_REGION_LIST = new ArrayList(0);
 074
-075  private static final 
ListHRegionInfo EMPTY_REGION_LIST = new ArrayList(0);
-076
-077  static final 
PredicateServerLoad IDLE_SERVER_PREDICATOR
-078= load - 
load.getNumberOfRegions() == 0;
+075  static final 
PredicateServerLoad IDLE_SERVER_PREDICATOR
+076= load - 
load.getNumberOfRegions() == 0;
+077
+078  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
 079
-080  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-081
-082  private static class DefaultRackManager 
extends RackManager {
-083@Override
-084public String getRack(ServerName 
server) {
-085  return UNKNOWN_RACK;
-086}
-087  }
-088
-089  /**
-090   * The constructor that uses the basic 
MetricsBalancer
-091   */
-092  protected BaseLoadBalancer() {
-093metricsBalancer = new 
MetricsBalancer();
-094  }
-095
-096  /**
-097   * This Constructor accepts an instance 
of MetricsBalancer,
-098   * which will be used instead of 
creating a new one
-099   */
-100  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-101this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-102  }
-103
-104  /**
-105   * An efficient array based 
implementation similar to ClusterState for keeping
-106   * the status of the cluster in terms 
of region assignment and distribution.
-107   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-108   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-109   * class uses mostly indexes and 
arrays.
-110   *
-111   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-112   * topology in terms of server names, 
hostnames and racks.
-113   */
-114  protected static class Cluster {
-115ServerName[] servers;
-116String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-117String[] racks;
-118boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-119
-120ArrayListString tables;
-121HRegionInfo[] regions;
-122DequeBalancerRegionLoad[] 
regionLoads;
-123private RegionLocationFinder 
regionFinder;
+080  private static class DefaultRackManager 
extends RackManager {
+081@Override
+082public String getRack(ServerName 
server) {
+083  return UNKNOWN_RACK;
+084}
+085  }
+086
+087  /**
+088   * The constructor that uses the basic 
MetricsBalancer
+089   */
+090  protected BaseLoadBalancer() {
+091metricsBalancer = new 
MetricsBalancer();
+092  }
+093
+094  /**
+095   * This Constructor accepts an instance 
of MetricsBalancer,
+096   * which will be used instead of 
creating a new one
+097   */
+098  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
+099this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
+100  }
+101
+102  /**
+103   * An efficient array based 
implementation similar to ClusterState for keeping
+104   * the status of the cluster in terms 
of region assignment and distribution.
+105   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
+106   * hundreds of thousands of hashmap 
manipulations are very 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
index f2c44db..6cf2fc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
@@ -2581,7 +2581,7 @@
 2573try {
 2574  // Restore snapshot
 2575  get(
-2576
internalRestoreSnapshotAsync(snapshotName, tableName, false),
+2576
internalRestoreSnapshotAsync(snapshotName, tableName),
 2577syncWaitTimeout,
 2578TimeUnit.MILLISECONDS);
 2579} catch (IOException e) {
@@ -2590,7 +2590,7 @@
 2582  if (takeFailSafeSnapshot) {
 2583try {
 2584  get(
-2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false),
+2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
 2586syncWaitTimeout,
 2587TimeUnit.MILLISECONDS);
 2588  String msg = "Restore 
snapshot=" + snapshotName +
@@ -2633,7 +2633,7 @@
 2625  throw new 
TableNotDisabledException(tableName);
 2626}
 2627
-2628return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
+2628return 
internalRestoreSnapshotAsync(snapshotName, tableName);
 2629  }
 2630
 2631  @Override
@@ -2643,1621 +2643,1614 @@
 2635  }
 2636
 2637  @Override
-2638  public void cloneSnapshot(String 
snapshotName, TableName tableName, boolean restoreAcl)
+2638  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
 2639  throws IOException, 
TableExistsException, RestoreSnapshotException {
 2640if (tableExists(tableName)) {
 2641  throw new 
TableExistsException(tableName);
 2642}
 2643get(
-2644  
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
+2644  
internalRestoreSnapshotAsync(snapshotName, tableName),
 2645  Integer.MAX_VALUE,
 2646  TimeUnit.MILLISECONDS);
 2647  }
 2648
 2649  @Override
-2650  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
-2651  throws IOException, 
TableExistsException, RestoreSnapshotException {
-2652cloneSnapshot(snapshotName, 
tableName, false);
-2653  }
-2654
-2655  @Override
-2656  public FutureVoid 
cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-2657  throws IOException, 
TableExistsException {
-2658if (tableExists(tableName)) {
-2659  throw new 
TableExistsException(tableName);
-2660}
-2661return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
-2662  }
-2663
-2664  @Override
-2665  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
-2666  throws IOException {
-2667ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2668final ExecProcedureRequest request 
=
-2669
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(
-2672  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
-2673@Override
-2674protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675  return 
master.execProcedureWithRet(getRpcController(), request);
-2676}
-2677  });
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2686final ExecProcedureRequest request 
=
-2687
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2688// run the procedure on the master
-2689ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2690getConnection(), 
getRpcControllerFactory()) {
-2691  @Override
-2692  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2693return 
master.execProcedure(getRpcController(), request);
-2694  }
-2695});
-2696
-2697long start = 
EnvironmentEdgeManager.currentTime();
-2698long max = 
response.getExpectedTimeout();
-2699long maxPauseTime = max / 
this.numRetries;
-2700int tries = 0;
-2701LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2702signature + " : " + instance + 
"'' to complete. (max " + maxPauseTime + " ms per 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 43aeaf4..87fbc95 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 77c752a..1479b91 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index c96c509..14f2f78 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 020ee4b..dd3c8ab 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index aa01886..390900b 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 02d0520..2736e70 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
   

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index 8b22aa1..f2c44db 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -100,4135 +100,4164 @@
 092import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-137import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Connection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Connection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Connection.html
index 2987e7b..7f61b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Connection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Connection.html
@@ -135,1305 +135,1251 @@
 127  private Listener listener = null;
 128  protected Responder responder = null;
 129
-130  /**
-131   * Datastructure that holds all 
necessary to a method invocation and then afterward, carries
-132   * the result.
-133   */
-134  @InterfaceStability.Evolving
-135  public class Call extends 
RpcServer.Call {
-136
-137protected Responder responder;
+130  /** Listens on the socket. Creates jobs 
for the handler threads*/
+131  private class Listener extends Thread 
{
+132
+133private ServerSocketChannel 
acceptChannel = null; //the accept channel
+134private Selector selector = null; 
//the selector that we use for the server
+135private Reader[] readers = null;
+136private int currentReader = 0;
+137private final int 
readerPendingConnectionQueueLength;
 138
-139
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-140justification="Can't figure why 
this complaint is happening... see below")
-141Call(int id, final BlockingService 
service, final MethodDescriptor md,
-142RequestHeader header, Message 
param, CellScanner cellScanner,
-143RpcServer.Connection connection, 
long size, TraceInfo tinfo,
-144final InetAddress remoteAddress, 
int timeout, CallCleanup reqCleanup,
-145Responder responder) {
-146  super(id, service, md, header, 
param, cellScanner, connection, size,
-147  tinfo, remoteAddress, timeout, 
reqCleanup);
-148  this.responder = responder;
-149}
+139private ExecutorService readPool;
+140
+141public Listener(final String name) 
throws IOException {
+142  super(name);
+143  // The backlog of requests that we 
will have the serversocket carry.
+144  int backlogLength = 
conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+145  readerPendingConnectionQueueLength 
=
+146  
conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
+147  // Create a new server socket and 
set to non blocking mode
+148  acceptChannel = 
ServerSocketChannel.open();
+149  
acceptChannel.configureBlocking(false);
 150
-151/**
-152 * Call is done. Execution happened 
and we returned results to client. It is now safe to
-153 * cleanup.
-154 */
-155
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
-156justification="Presume the lock 
on processing request held by caller is protection enough")
-157@Override
-158void done() {
-159  super.done();
-160  this.getConnection().decRpcCount(); 
// Say that we're done with this call.
-161}
-162
-163@Override
-164public long disconnectSince() {
-165  if 
(!getConnection().isConnectionOpen()) {
-166return System.currentTimeMillis() 
- timestamp;
-167  } else {
-168return -1L;
-169  }
-170}
-171
-172@Override
-173public synchronized void 
sendResponseIfReady() throws IOException {
-174  // set param null to reduce memory 
pressure
-175  this.param = null;
-176  this.responder.doRespond(this);
+151  // Bind the server socket to the 
binding addrees (can be different from the default interface)
+152  bind(acceptChannel.socket(), 
bindAddress, backlogLength);
+153  port = 
acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
+154  address = 
(InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
+155  // create a selector;
+156  selector = Selector.open();
+157
+158  readers = new 
Reader[readThreads];
+159  // Why this executor thing? Why not 
like hadoop just start up all the threads? I suppose it
+160  // has an advantage in that it is 
easy to shutdown the pool.
+161  readPool = 
Executors.newFixedThreadPool(readThreads,
+162new 
ThreadFactoryBuilder().setNameFormat(
+163  
"RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
+164  ",port=" + 
port).setDaemon(true)
+165
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
+166  for (int i = 0; i  readThreads; 
++i) {
+167Reader reader = new Reader();
+168readers[i] = reader;
+169readPool.execute(reader);
+170  }
+171  LOG.info(getName() + ": started " + 
readThreads + " reader(s) listening on port=" + port);
+172
+173  // Register accepts 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index f3f7a46..8750fa2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -56,2015 +56,2125 @@
 048import 
org.apache.hadoop.hbase.MetaTableAccessor;
 049import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
 050import 
org.apache.hadoop.hbase.NotServingRegionException;
-051import 
org.apache.hadoop.hbase.RegionLocations;
-052import 
org.apache.hadoop.hbase.ServerName;
-053import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-054import 
org.apache.hadoop.hbase.HConstants;
-055import 
org.apache.hadoop.hbase.TableExistsException;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-058import 
org.apache.hadoop.hbase.TableNotDisabledException;
-059import 
org.apache.hadoop.hbase.TableNotFoundException;
-060import 
org.apache.hadoop.hbase.UnknownRegionException;
-061import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-064import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-065import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-066import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-067import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-068import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-069import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-070import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-071import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-072import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-073import 
org.apache.hadoop.hbase.replication.ReplicationException;
-074import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-075import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-076import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-102import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html
index 6c52543..f3f7a46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html
@@ -31,1797 +31,2040 @@
 023import java.util.ArrayList;
 024import java.util.Arrays;
 025import java.util.Collection;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Optional;
-031import 
java.util.concurrent.CompletableFuture;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import java.util.function.BiConsumer;
-035import java.util.regex.Pattern;
-036import java.util.stream.Collectors;
-037
-038import 
com.google.common.annotations.VisibleForTesting;
-039
-040import io.netty.util.Timeout;
-041import io.netty.util.TimerTask;
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-049import 
org.apache.hadoop.hbase.NotServingRegionException;
-050import 
org.apache.hadoop.hbase.RegionLocations;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-053import 
org.apache.hadoop.hbase.HConstants;
-054import 
org.apache.hadoop.hbase.TableExistsException;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-057import 
org.apache.hadoop.hbase.TableNotFoundException;
-058import 
org.apache.hadoop.hbase.UnknownRegionException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-061import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-062import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-063import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-064import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-065import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-066import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-067import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-068import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-069import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-070import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index d2ce8ca..8216665 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -342,7 +342,7 @@ implements 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[]
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]
 batchTableOperations(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
 AsyncHBaseAdmin.TableOperatoroperator,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringoperationType)
@@ -414,20 +414,20 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-createTable(HTableDescriptordesc)
+createTable(TableDescriptordesc)
 Creates a new table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-createTable(HTableDescriptordesc,
+createTable(TableDescriptordesc,
byte[][]splitKeys)
 Creates a new table with an initial set of empty regions 
defined by the specified split keys.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-createTable(HTableDescriptordesc,
+createTable(TableDescriptordesc,
byte[]startKey,
byte[]endKey,
intnumRegions)
@@ -454,13 +454,13 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[]
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]
 deleteTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
 Delete tables matching the passed in pattern and wait on 
completion.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[]
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]
 deleteTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Deletes tables matching the passed in pattern and wait on 
completion.
 
@@ -478,13 +478,13 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[]
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]
 disableTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
 Disable tables matching the passed in pattern.
 
 
 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
index be839b7..72853dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -45,1639 +45,1784 @@
 037
 038import 
com.google.common.annotations.VisibleForTesting;
 039
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.hbase.HColumnDescriptor;
-043import 
org.apache.hadoop.hbase.HRegionInfo;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.HTableDescriptor;
-046import 
org.apache.hadoop.hbase.MetaTableAccessor;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-048import 
org.apache.hadoop.hbase.NotServingRegionException;
-049import 
org.apache.hadoop.hbase.RegionLocations;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-055import 
org.apache.hadoop.hbase.TableNotFoundException;
-056import 
org.apache.hadoop.hbase.UnknownRegionException;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-059import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-060import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-061import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-062import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-063import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-066import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-067import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-068import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-069import 
org.apache.hadoop.hbase.replication.ReplicationException;
-070import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-071import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-096import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index ac4a9b3..be839b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -30,212 +30,212 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Optional;
-028import 
java.util.concurrent.CompletableFuture;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import java.util.function.BiConsumer;
-032import java.util.regex.Pattern;
-033
-034import 
com.google.common.annotations.VisibleForTesting;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.HRegionLocation;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.MetaTableAccessor;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-043import 
org.apache.hadoop.hbase.NotServingRegionException;
-044import 
org.apache.hadoop.hbase.RegionLocations;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.UnknownRegionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-054import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-055import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-060import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-061import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-085import 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
index 5c737d3..b6d92d0 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":42,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class StoreFileReader
+public class StoreFileReader
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Reader for a StoreFile.
 
@@ -144,45 +144,45 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 bulkLoadResult
 
 
-private boolean
-compactedAway
-
-
 protected BloomFilter
 deleteFamilyBloomFilter
 
-
+
 private long
 deleteFamilyCnt
 
-
+
 protected BloomFilter
 generalBloomFilter
 
-
+
 private byte[]
 lastBloomKey
 
-
+
 private KeyValue.KeyOnlyKeyValue
 lastBloomKeyOnlyKV
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG
 
-
+
 private HFile.Reader
 reader
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
 refCount
 
-
+
 protected long
 sequenceID
 
+
+private boolean
+shared
+
 
 private boolean
 skipResetSeqId
@@ -203,27 +203,43 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 Constructors
 
-Constructor and Description
+Modifier
+Constructor and Description
 
 
-StoreFileReader()
+(package private)
+StoreFileReader()
 ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS
 
 
 
-StoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+
+StoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
CacheConfigcacheConf,
+   booleanprimaryReplicaStoreFile,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
+   booleanshared,

org.apache.hadoop.conf.Configurationconf)
 
 
-StoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+
+StoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
FSDataInputStreamWrapperin,
longsize,
CacheConfigcacheConf,
+   booleanprimaryReplicaStoreFile,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
+   booleanshared,

org.apache.hadoop.conf.Configurationconf)
 
+
+private 
+StoreFileReader(HFile.Readerreader,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
+   booleanshared)
+
 
 
 
@@ -251,10 +267,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 (package private) void
-decrementRefCount()
-Decrement the ref count associated with the reader when 
ever a scanner associated
- with the reader is closed
-
+copyFields(StoreFileReaderreader)
 
 
 (package private) void
@@ -316,10 +329,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getMaxTimestamp()
 
 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
new file mode 100644
index 000..092287e
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
@@ -0,0 +1,337 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Copyright The Apache Software 
Foundation
+003 *
+004 * Licensed to the Apache Software 
Foundation (ASF) under one or more
+005 * contributor license agreements. See 
the NOTICE file distributed with this
+006 * work for additional information 
regarding copyright ownership. The ASF
+007 * licenses this file to you under the 
Apache License, Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance with the License.
+009 * You may obtain a copy of the License 
at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS, WITHOUT
+015 * WARRANTIES OR CONDITIONS OF ANY KIND, 
either express or implied. See the
+016 * License for the specific language 
governing permissions and limitations
+017 * under the License.
+018 */
+019package 
org.apache.hadoop.hbase.regionserver;
+020
+021import 
java.util.concurrent.BlockingQueue;
+022import java.util.concurrent.Executors;
+023import 
java.util.concurrent.LinkedBlockingQueue;
+024import 
java.util.concurrent.ScheduledExecutorService;
+025import java.util.concurrent.TimeUnit;
+026import 
java.util.concurrent.atomic.AtomicLong;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+031import 
org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObserver;
+032import 
org.apache.hadoop.util.StringUtils;
+033
+034import 
com.google.common.annotations.VisibleForTesting;
+035import 
com.google.common.util.concurrent.ThreadFactoryBuilder;
+036
+037/**
+038 * A pool of {@link Chunk} instances.
+039 * 
+040 * MemStoreChunkPool caches a number of 
retired chunks for reusing, it could
+041 * decrease allocating bytes when 
writing, thereby optimizing the garbage
+042 * collection on JVM.
+043 * 
+044 * The pool instance is globally unique 
and could be obtained through
+045 * {@link 
MemStoreChunkPool#initialize(long, float, float, int, boolean)}
+046 * 
+047 * {@link MemStoreChunkPool#getChunk()} 
is called when MemStoreLAB allocating
+048 * bytes, and {@link 
MemStoreChunkPool#putbackChunks(BlockingQueue)} is called
+049 * when MemStore clearing snapshot for 
flush
+050 */
+051@SuppressWarnings("javadoc")
+052@InterfaceAudience.Private
+053public class MemStoreChunkPool implements 
HeapMemoryTuneObserver {
+054  private static final Log LOG = 
LogFactory.getLog(MemStoreChunkPool.class);
+055
+056  // Static reference to the 
MemStoreChunkPool
+057  static MemStoreChunkPool 
GLOBAL_INSTANCE;
+058  /** Boolean whether we have disabled 
the memstore chunk pool entirely. */
+059  static boolean chunkPoolDisabled = 
false;
+060
+061  private int maxCount;
+062
+063  // A queue of reclaimed chunks
+064  private final 
BlockingQueueChunk reclaimedChunks;
+065  private final int chunkSize;
+066  private final float 
poolSizePercentage;
+067
+068  /** Statistics thread schedule pool 
*/
+069  private final ScheduledExecutorService 
scheduleThreadPool;
+070  /** Statistics thread */
+071  private static final int 
statThreadPeriod = 60 * 5;
+072  private final AtomicLong chunkCount = 
new AtomicLong();
+073  private final AtomicLong 
reusedChunkCount = new AtomicLong();
+074  private final boolean offheap;
+075
+076  MemStoreChunkPool(int chunkSize, int 
maxCount, int initialCount, float poolSizePercentage,
+077  boolean offheap) {
+078this.maxCount = maxCount;
+079this.chunkSize = chunkSize;
+080this.poolSizePercentage = 
poolSizePercentage;
+081this.offheap = offheap;
+082this.reclaimedChunks = new 
LinkedBlockingQueue();
+083for (int i = 0; i  initialCount; 
i++) {
+084  Chunk chunk = this.offheap ? new 
OffheapChunk(chunkSize) : new OnheapChunk(chunkSize);
+085  chunk.init();
+086  reclaimedChunks.add(chunk);
+087}
+088chunkCount.set(initialCount);
+089final String n = 
Thread.currentThread().getName();
+090scheduleThreadPool = 
Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
+091.setNameFormat(n + 
"-MemStoreChunkPool Statistics").setDaemon(true).build());
+092

<    1   2   3   >