[28/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftAdmin.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftAdmin.html
new file mode 100644
index 000..74f2523
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftAdmin.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.thrift2.client.ThriftAdmin 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.thrift2.client.ThriftAdmin
+
+No usage of 
org.apache.hadoop.hbase.thrift2.client.ThriftAdmin
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftClientBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftClientBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftClientBuilder.html
new file mode 100644
index 000..0827189
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/thrift2/client/class-use/ThriftClientBuilder.html
@@ -0,0 +1,199 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.thrift2.client.ThriftClientBuilder (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses 
of Classorg.apache.hadoop.hbase.thrift2.client.ThriftClientBuilder
+
+
+
+
+
+Packages that use ThriftClientBuilder
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.thrift2.client
+
+
+
+
+
+
+
+
+
+
+Uses of ThriftClientBuilder in org.apache.hadoop.hbase.thrift2.client
+
+Subclasses of ThriftClientBuilder in org.apache.hadoop.hbase.thrift2.client
+
+Modifier and Type
+Class and Description
+
+
+
+static class
+ThriftConnection.DefaultThriftClientBuilder
+the default thrift client builder.
+
+
+
+static class
+ThriftConnection.HTTPThriftClientBuilder
+the default thrift http client builder.
+
+
+
+
+
+Fields in org.apache.hadoop.hbase.thrift2.client
 declared as ThriftClientBuilder
+
+Modifier and Type
+Field and Description
+
+
+
+private ThriftClientBuilder
+ThriftConnection.clientBuilder
+
+
+
+
+Methods in org.apache.hadoop.hbase.thrift2.client
 that return ThriftClientBuilder
+
+Modifier and Type
+Method and Description
+
+
+
+ThriftClientBuilder
+ThriftConnection.getClientBuilder()
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072019 https://www.apache.org/;>The Apache Software 

[28/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index ab022a4..c3d54f6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -413,51 +413,58 @@
 
 
 default void
+MasterObserver.postIsRpcThrottleEnabled(ObserverContextMasterCoprocessorEnvironmentctx,
+booleanrpcThrottleEnabled)
+Called after getting if is rpc throttle enabled.
+
+
+
+default void
 MasterObserver.postListDecommissionedRegionServers(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after list decommissioned region servers.
 
 
-
+
 default void
 MasterObserver.postListNamespaceDescriptors(ObserverContextMasterCoprocessorEnvironmentctx,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
 Called after a listNamespaceDescriptors request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postListReplicationPeers(ObserverContextMasterCoprocessorEnvironmentctx,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Called after list replication peers.
 
 
-
+
 default void
 MasterObserver.postListRSGroups(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after listing region server group information.
 
 
-
+
 default void
 MasterObserver.postListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
 SnapshotDescriptionsnapshot)
 Called after listSnapshots request has been processed.
 
 
-
+
 default void
 MasterObserver.postLockHeartbeat(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after heartbeat to a lock.
 
 
-
+
 default void
 MasterObserver.postMergeRegions(ObserverContextMasterCoprocessorEnvironmentc,
 RegionInfo[]regionsToMerge)
 called after merge regions request.
 
 
-
+
 default void
 MasterObserver.postMergeRegionsCommitAction(ObserverContextMasterCoprocessorEnvironmentctx,
 RegionInfo[]regionsToMerge,
@@ -465,7 +472,7 @@
 This will be called after META step as part of regions 
merge transaction.
 
 
-
+
 default void
 MasterObserver.postModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
NamespaceDescriptorcurrentNsDescriptor)
@@ -474,7 +481,7 @@
 
 
 
-
+
 default void
 MasterObserver.postModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
NamespaceDescriptoroldNsDescriptor,
@@ -482,7 +489,7 @@
 Called after the modifyNamespace operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
@@ -492,7 +499,7 @@
 
 
 
-
+
 default void
 MasterObserver.postModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
@@ -501,7 +508,7 @@
 Called after the modifyTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postMove(ObserverContextMasterCoprocessorEnvironmentctx,
 RegionInforegion,
@@ -510,7 +517,7 @@
 Called after the region move has been requested.
 
 
-
+
 default void
 MasterObserver.postMoveServers(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetAddressservers,
@@ -518,7 +525,7 @@
 Called after servers are moved to target region server 
group
 
 
-
+
 default void
 MasterObserver.postMoveServersAndTables(ObserverContextMasterCoprocessorEnvironmentctx,
 https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetAddressservers,
@@ -527,7 +534,7 @@
 Called after servers are moved to target region server 
group
 
 
-
+
 default void
 MasterObserver.postMoveTables(ObserverContextMasterCoprocessorEnvironmentctx,
   https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetTableNametables,
@@ -535,7 +542,7 @@
 Called after servers are moved to target region server 
group
 
 
-
+
 default void
 MasterObserver.postRecommissionRegionServer(ObserverContextMasterCoprocessorEnvironmentctx,
 ServerNameserver,
@@ -543,35 +550,35 @@
 Called after recommission region server.
 
 
-
+
 default void
 

[28/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
index cd0ff28..4f9947f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
@@ -31,161 +31,161 @@
 023import static 
org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
 024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
 025import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-026
-027import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-028
-029import java.io.IOException;
-030import java.util.ArrayList;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.IdentityHashMap;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037import 
java.util.concurrent.CompletableFuture;
-038import 
java.util.concurrent.ConcurrentHashMap;
-039import 
java.util.concurrent.ConcurrentLinkedQueue;
-040import 
java.util.concurrent.ConcurrentMap;
-041import 
java.util.concurrent.ConcurrentSkipListMap;
-042import java.util.concurrent.TimeUnit;
-043import java.util.function.Supplier;
-044import java.util.stream.Collectors;
-045import java.util.stream.Stream;
-046
-047import 
org.apache.hadoop.hbase.CellScannable;
-048import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for batch.
-067 * p
-068 * Notice that, the {@link 
#operationTimeoutNs} is the total time limit now which is the same with
-069 * other single operations
-070 * p
-071 * And the {@link #maxAttempts} is a 
limit for each single operation in the batch logically. In the
-072 * implementation, we will record a 
{@code tries} parameter for each operation group, and if it is
-073 * split to several groups when retrying, 
the sub groups will inherit the {@code tries}. You can
-074 * imagine that the whole retrying 
process is a tree, and the {@link #maxAttempts} is the limit of
-075 * the depth of the tree.
-076 */
-077@InterfaceAudience.Private
-078class 
AsyncBatchRpcRetryingCallerT {
-079
-080  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncBatchRpcRetryingCaller.class);
-081
-082  private final HashedWheelTimer 
retryTimer;
-083
-084  private final AsyncConnectionImpl 
conn;
-085
-086  private final TableName tableName;
-087
-088  private final ListAction 
actions;
-089
-090  private final 
ListCompletableFutureT futures;
-091
-092  private final 
IdentityHashMapAction, CompletableFutureT action2Future;
-093
-094  private final 
IdentityHashMapAction, ListThrowableWithExtraContext 
action2Errors;
-095
-096  private final long pauseNs;
-097
-098  private final int maxAttempts;
-099
-100  private final long 
operationTimeoutNs;
-101
-102  private final long rpcTimeoutNs;
-103
-104  private final int startLogErrorsCnt;
-105
-106  private final long startNs;
-107
-108  // we can not use HRegionLocation as 
the map key because the hashCode and equals method of
-109  // HRegionLocation only consider 
serverName.
-110  private static final class 
RegionRequest {
-111
-112public final HRegionLocation loc;
-113
-114public final 
ConcurrentLinkedQueueAction actions = new 
ConcurrentLinkedQueue();
-115
-116public RegionRequest(HRegionLocation 
loc) {
-117  this.loc = loc;
-118}
-119  }
-120
-121  private static final class 
ServerRequest {
-122
-123public final ConcurrentMapbyte[], 
RegionRequest actionsByRegion =
-124new 
ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
-125
-126public void addAction(HRegionLocation 
loc, Action action) {
-127 

[28/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
index c4e8c8b..aa58108 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
@@ -82,602 +82,613 @@
 074  public static final String 
USER_COPROCESSORS_ENABLED_CONF_KEY =
 075"hbase.coprocessor.user.enabled";
 076  public static final boolean 
DEFAULT_USER_COPROCESSORS_ENABLED = true;
-077
-078  private static final Logger LOG = 
LoggerFactory.getLogger(CoprocessorHost.class);
-079  protected Abortable abortable;
-080  /** Ordered set of loaded coprocessors 
with lock */
-081  protected final SortedListE 
coprocEnvironments =
-082  new SortedList(new 
EnvironmentPriorityComparator());
-083  protected Configuration conf;
-084  // unique file prefix to use for local 
copies of jars when classloading
-085  protected String pathPrefix;
-086  protected AtomicInteger loadSequence = 
new AtomicInteger();
-087
-088  public CoprocessorHost(Abortable 
abortable) {
-089this.abortable = abortable;
-090this.pathPrefix = 
UUID.randomUUID().toString();
-091  }
-092
-093  /**
-094   * Not to be confused with the 
per-object _coprocessors_ (above),
-095   * coprocessorNames is static and 
stores the set of all coprocessors ever
-096   * loaded by any thread in this JVM. It 
is strictly additive: coprocessors are
-097   * added to coprocessorNames, by 
checkAndLoadInstance() but are never removed, since
-098   * the intention is to preserve a 
history of all loaded coprocessors for
-099   * diagnosis in case of server crash 
(HBASE-4014).
-100   */
-101  private static SetString 
coprocessorNames =
-102  Collections.synchronizedSet(new 
HashSetString());
-103
-104  public static SetString 
getLoadedCoprocessors() {
-105synchronized (coprocessorNames) {
-106  return new 
HashSet(coprocessorNames);
-107}
-108  }
-109
-110  /**
-111   * Used to create a parameter to the 
HServerLoad constructor so that
-112   * HServerLoad can provide information 
about the coprocessors loaded by this
-113   * regionserver.
-114   * (HBASE-4070: Improve region server 
metrics to report loaded coprocessors
-115   * to master).
-116   */
-117  public SetString 
getCoprocessors() {
-118SetString returnValue = new 
TreeSet();
-119for (E e: coprocEnvironments) {
-120  
returnValue.add(e.getInstance().getClass().getSimpleName());
-121}
-122return returnValue;
-123  }
-124
-125  /**
-126   * Load system coprocessors once only. 
Read the class names from configuration.
-127   * Called by constructor.
-128   */
-129  protected void 
loadSystemCoprocessors(Configuration conf, String confKey) {
-130boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
-131  DEFAULT_COPROCESSORS_ENABLED);
-132if (!coprocessorsEnabled) {
-133  return;
-134}
-135
-136Class? implClass;
-137
-138// load default coprocessors from 
configure file
-139String[] defaultCPClasses = 
conf.getStrings(confKey);
-140if (defaultCPClasses == null || 
defaultCPClasses.length == 0)
-141  return;
-142
-143int priority = 
Coprocessor.PRIORITY_SYSTEM;
-144for (String className : 
defaultCPClasses) {
-145  className = className.trim();
-146  if (findCoprocessor(className) != 
null) {
-147// If already loaded will just 
continue
-148LOG.warn("Attempted duplicate 
loading of " + className + "; skipped");
-149continue;
-150  }
-151  ClassLoader cl = 
this.getClass().getClassLoader();
-152  
Thread.currentThread().setContextClassLoader(cl);
-153  try {
-154implClass = 
cl.loadClass(className);
-155// Add coprocessors as we go to 
guard against case where a coprocessor is specified twice
-156// in the configuration
-157E env = 
checkAndLoadInstance(implClass, priority, conf);
-158if (env != null) {
-159  
this.coprocEnvironments.add(env);
-160  LOG.info("System coprocessor {} 
loaded, priority={}.", className, priority);
-161  ++priority;
-162}
-163  } catch (Throwable t) {
-164// We always abort if system 
coprocessors cannot be loaded
-165abortServer(className, t);
-166  }
-167}
-168  }
-169
-170  /**
-171   * Load a coprocessor implementation 
into the host
-172   * @param path path to implementation 
jar
-173   * @param className the main class 
name
-174   * @param priority chaining priority
-175   * @param conf configuration for 
coprocessor
-176   * 

[28/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
index b3df06f..1c79b4b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
@@ -325,6 +325,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.html
index 2918e13..63d211c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
index e71ca45..f94dbcc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
@@ -197,6 +197,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.html
index f1e9b9e..2a3c6e0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.html
@@ -162,6 +162,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMetaRegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMetaRegionLocator.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMetaRegionLocator.html
index 144878f..08b6048 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMetaRegionLocator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMetaRegionLocator.html
@@ -162,6 +162,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
index c82494f..77d960b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
@@ -220,6 +220,6 @@
 
 
 
-Copyright  20072018 

[28/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
index f88bae9..6871468 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":42,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":42,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":42,"i72":9};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":42,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":42,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":42,"i73":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionCoprocessorHost
+public class RegionCoprocessorHost
 extends CoprocessorHostRegionCoprocessor,RegionCoprocessorEnvironment
 Implements the coprocessor environment and runtime support 
for coprocessors
  loaded within a Region.
@@ -289,20 +289,25 @@ extends Resultresult)
 
 
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,Cell
+postAppendBeforeWAL(Mutationmutation,
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,CellcellPairs)
+
+
 void
 postBatchMutate(MiniBatchOperationInProgressMutationminiBatchOp)
 
-
+
 void
 postBatchMutateIndispensably(MiniBatchOperationInProgressMutationminiBatchOp,
 booleansuccess)
 
-
+
 void
 postBulkLoadHFile(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfamilyPaths,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Pathmap)
 
-
+
 boolean
 postCheckAndDelete(byte[]row,
   byte[]family,
@@ -312,7 +317,7 @@ extends Deletedelete,
   booleanresult)
 
-
+
 boolean
 postCheckAndPut(byte[]row,
byte[]family,
@@ -322,23 +327,23 @@ extends Putput,
booleanresult)
 
-
+
 void
 postClose(booleanabortRequested)
 Invoked after a region is closed
 
 
-
+
 void
 postCloseRegionOperation(Region.Operationop)
 
-
+
 void
 postCommitStoreFile(byte[]family,
org.apache.hadoop.fs.PathsrcPath,
org.apache.hadoop.fs.PathdstPath)
 
-
+
 void
 postCompact(HStorestore,
HStoreFileresultFile,
@@ -348,7 +353,7 @@ extends Called after the store compaction has completed.
 
 
-
+
 void
 postCompactSelection(HStorestore,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHStoreFileselected,
@@ -359,31 +364,31 @@ extends 
+
 void
 postDelete(Deletedelete,
   WALEditedit,
   Durabilitydurability)
 
-
+
 void
 postEndpointInvocation(com.google.protobuf.Serviceservice,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class 

[28/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 6e007de..e258dca 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":9,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10
 
,"i110":9,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":41,"i118":41,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":9,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":42,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":9,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":9,"i187":10,"i188":10,"i189":9,"i190":9,"i191":9,"i192":9,"i193":9,"i194":9,"i195":9,"i196":9,"i197":9,"i198":9,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":9,"i209":10,"i210":10,"
 
i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":9,"i241":9,"i242":10,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":10,"i254":9,"i255":10,"i256":10,"i257":10,"i258":10,"i259":10,"i260":10,"i261":10,"i262":9,"i263":10,"i264":10,"i265":10,"i266":10,"i267":10,"i268":9,"i269":10,"i270":10,"i271":10,"i272":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":9,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10
 

[28/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 
abortRequested;
-321  public static final 

[28/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
index 5267fdd..b2da9e3 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
@@ -140,26 +140,31 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?i
 Enum Constant and Description
 
 
+CRASHED
+Indicate that the server has crashed, i.e., we have already 
scheduled a SCP for it.
+
+
+
 OFFLINE
 WAL splitting done.
 
 
-
+
 ONLINE
 Initial state.
 
 
-
+
 SPLITTING
 Server expired/crashed.
 
 
-
+
 SPLITTING_META
 Only server which carries meta can have this state.
 
 
-
+
 SPLITTING_META_DONE
 Indicate that the meta splitting is done.
 
@@ -231,13 +236,23 @@ the order they are declared.
 Initial state. Available.
 
 
+
+
+
+
+
+CRASHED
+public static finalServerState CRASHED
+Indicate that the server has crashed, i.e., we have already 
scheduled a SCP for it.
+
+
 
 
 
 
 
 SPLITTING_META
-public static finalServerState SPLITTING_META
+public static finalServerState SPLITTING_META
 Only server which carries meta can have this state. We will 
split wal for meta and then
  assign meta first before splitting other wals.
 
@@ -248,7 +263,7 @@ the order they are declared.
 
 
 SPLITTING_META_DONE
-public static finalServerState SPLITTING_META_DONE
+public static finalServerState SPLITTING_META_DONE
 Indicate that the meta splitting is done. We need this 
state so that the UnassignProcedure
  for meta can safely quit. See the comments in 
UnassignProcedure.remoteCallFailed for more
  details.
@@ -260,7 +275,7 @@ the order they are declared.
 
 
 SPLITTING
-public static finalServerState SPLITTING
+public static finalServerState SPLITTING
 Server expired/crashed. Currently undergoing WAL 
splitting.
 
 
@@ -270,7 +285,7 @@ the order they are declared.
 
 
 OFFLINE
-public static finalServerState OFFLINE
+public static finalServerState OFFLINE
 WAL splitting done. This state will be used to tell the 
UnassignProcedure that it can safely
  quit. See the comments in UnassignProcedure.remoteCallFailed for more 
details.
 
@@ -289,7 +304,7 @@ the order they are declared.
 
 
 values
-public staticServerState[]values()
+public staticServerState[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -309,7 +324,7 @@ for (ServerState c : ServerState.values())
 
 
 valueOf
-public staticServerStatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticServerStatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerStateNode.ServerReportEvent.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerStateNode.ServerReportEvent.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerStateNode.ServerReportEvent.html
deleted file mode 100644
index f32c924..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerStateNode.ServerReportEvent.html
+++ /dev/null
@@ -1,254 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-ServerStateNode.ServerReportEvent (Apache HBase 3.0.0-SNAPSHOT 
API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.assignment
-Class 
ServerStateNode.ServerReportEvent
-
-
-

[28/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/org/apache/hadoop/hbase/security/access/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-summary.html
index 85862c4..f46c7df 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-summary.html
@@ -138,6 +138,18 @@
 
 
 
+AuthManager
+
+Performs authorization checks for a given user's assigned 
permissions.
+
+
+
+AuthManager.PermissionCacheT 
extends Permission
+
+Cache of permissions, it is thread safe.
+
+
+
 AuthResult
 
 Represents the result of an authorization check for logging 
and error
@@ -155,6 +167,18 @@
 
 
 
+GlobalPermission
+
+Represents an authorization for access whole cluster.
+
+
+
+NamespacePermission
+
+Represents an authorization for access for the given 
namespace.
+
+
+
 Permission
 
 Base permissions instance representing the ability to 
perform a given set
@@ -175,16 +199,6 @@
 
 
 
-TableAuthManager
-
-Performs authorization checks for a given user's assigned 
permissions
-
-
-
-TableAuthManager.PermissionCacheT
 extends Permission
-
-
-
 TablePermission
 
 Represents an authorization for access for the given 
actions, optionally
@@ -195,8 +209,7 @@
 
 UserPermission
 
-Represents an authorization for access over the given 
table, column family
- plus qualifier, for the given user.
+UserPermission consists of a user name and a 
permission.
 
 
 
@@ -229,6 +242,10 @@
 Permission.Action
 
 
+
+Permission.Scope
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index f594c43..b6b603e 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -86,6 +86,8 @@
 org.apache.hadoop.hbase.security.access.AccessController (implements 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.Interface,
 org.apache.hadoop.hbase.coprocessor.BulkLoadObserver, 
org.apache.hadoop.hbase.coprocessor.EndpointObserver, 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, 
org.apache.hadoop.hbase.c
 oprocessor.MasterObserver, 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver, 
org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionServerObserver)
 org.apache.hadoop.hbase.security.access.AccessControlLists
 org.apache.hadoop.hbase.security.access.AccessControlUtil
+org.apache.hadoop.hbase.security.access.AuthManager (implements java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable)
+org.apache.hadoop.hbase.security.access.AuthManager.PermissionCacheT
 org.apache.hadoop.hbase.security.access.AuthResult
 org.apache.hadoop.hbase.security.access.AuthResult.Params
 org.apache.hadoop.hbase.security.access.CoprocessorWhitelistMasterObserver (implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, 
org.apache.hadoop.hbase.coprocessor.MasterObserver)
@@ -104,22 +106,19 @@
 
 
 org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil
-org.apache.hadoop.hbase.security.access.TableAuthManager (implements java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable)
-org.apache.hadoop.hbase.security.access.TableAuthManager.PermissionCacheT
 org.apache.hadoop.hbase.security.User
 
 org.apache.hadoop.hbase.security.access.AccessChecker.InputUser
 
 
+org.apache.hadoop.hbase.security.access.UserPermission
 org.apache.hadoop.io.VersionedWritable (implements 
org.apache.hadoop.io.Writable)
 
 org.apache.hadoop.hbase.security.access.Permission
 
-org.apache.hadoop.hbase.security.access.TablePermission
-
-org.apache.hadoop.hbase.security.access.UserPermission
-
-
+org.apache.hadoop.hbase.security.access.GlobalPermission
+org.apache.hadoop.hbase.security.access.NamespacePermission
+org.apache.hadoop.hbase.security.access.TablePermission
 
 
 
@@ -143,8 +142,9 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, 

[28/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
index 0af8acd..c5f21ac 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
@@ -645,1615 +645,1597 @@
 637
proc.afterReplay(getEnvironment());
 638  }
 639});
-640
-641// 4. Push the procedures to the 
timeout executor
-642waitingTimeoutList.forEach(proc - 
{
-643  
proc.afterReplay(getEnvironment());
-644  timeoutExecutor.add(proc);
-645});
-646// 5. restore locks
-647restoreLocks();
-648// 6. Push the procedure to the 
scheduler
-649
failedList.forEach(scheduler::addBack);
-650runnableList.forEach(p - {
-651  p.afterReplay(getEnvironment());
-652  if (!p.hasParent()) {
-653
sendProcedureLoadedNotification(p.getProcId());
-654  }
-655  // If the procedure holds the lock, 
put the procedure in front
-656  // If its parent holds the lock, 
put the procedure in front
-657  // TODO. Is that possible that its 
ancestor holds the lock?
-658  // For now, the deepest procedure 
hierarchy is:
-659  // ModifyTableProcedure - 
ReopenTableProcedure -
-660  // MoveTableProcedure - 
Unassign/AssignProcedure
-661  // But ModifyTableProcedure and 
ReopenTableProcedure won't hold the lock
-662  // So, check parent lock is 
enough(a tricky case is resovled by HBASE-21384).
-663  // If some one change or add new 
procedures making 'grandpa' procedure
-664  // holds the lock, but parent 
procedure don't hold the lock, there will
-665  // be a problem here. We have to 
check one procedure's ancestors.
-666  // And we need to change 
LockAndQueue.hasParentLock(Procedure? proc) method
-667  // to check all ancestors too.
-668  if (p.isLockedWhenLoading() || 
(p.hasParent()  procedures
-669  
.get(p.getParentProcId()).isLockedWhenLoading())) {
-670scheduler.addFront(p, false);
-671  } else {
-672// if it was not, it can wait.
-673scheduler.addBack(p, false);
-674  }
-675});
-676// After all procedures put into the 
queue, signal the worker threads.
-677// Otherwise, there is a race 
condition. See HBASE-21364.
-678scheduler.signalAll();
-679  }
+640// 4. restore locks
+641restoreLocks();
+642
+643// 5. Push the procedures to the 
timeout executor
+644waitingTimeoutList.forEach(proc - 
{
+645  
proc.afterReplay(getEnvironment());
+646  timeoutExecutor.add(proc);
+647});
+648
+649// 6. Push the procedure to the 
scheduler
+650
failedList.forEach(scheduler::addBack);
+651runnableList.forEach(p - {
+652  p.afterReplay(getEnvironment());
+653  if (!p.hasParent()) {
+654
sendProcedureLoadedNotification(p.getProcId());
+655  }
+656  scheduler.addBack(p);
+657});
+658// After all procedures put into the 
queue, signal the worker threads.
+659// Otherwise, there is a race 
condition. See HBASE-21364.
+660scheduler.signalAll();
+661  }
+662
+663  /**
+664   * Initialize the procedure executor, 
but do not start workers. We will start them later.
+665   * p/
+666   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
+667   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
+668   * pending and in-progress 
procedures.
+669   * @param numThreads number of threads 
available for procedure execution.
+670   * @param abortOnCorruption true if you 
want to abort your service in case a corrupted procedure
+671   *  is found on replay. 
otherwise false.
+672   */
+673  public void init(int numThreads, 
boolean abortOnCorruption) throws IOException {
+674// We have numThreads executor + one 
timer thread used for timing out
+675// procedures and triggering periodic 
procedures.
+676this.corePoolSize = numThreads;
+677this.maxPoolSize = 10 * numThreads;
+678LOG.info("Starting {} core workers 
(bigger of cpus/4 or 16) with max (burst) worker count={}",
+679corePoolSize, maxPoolSize);
 680
-681  /**
-682   * Initialize the procedure executor, 
but do not start workers. We will start them later.
-683   * p/
-684   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
-685   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
-686   * pending and 

[28/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  

[28/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.html
index a281f12..d4b3978 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.html
@@ -25,594 +25,119 @@
 017 */
 018package 
org.apache.hadoop.hbase.procedure2.store.wal;
 019
-020import java.io.IOException;
-021import 
org.apache.hadoop.hbase.procedure2.Procedure;
-022import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-023import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-024import org.slf4j.Logger;
-025import org.slf4j.LoggerFactory;
-026
-027import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-028
-029/**
-030 * We keep an in-memory map of the 
procedures sorted by replay order. (see the details in the
-031 * beginning of {@link 
ProcedureWALFormatReader}).
-032 *
-033 * pre
-034 *  procedureMap = | A |   | E |   | 
C |   |   |   |   | G |   |   |
-035 *   D   
B
-036 *  replayOrderHead = C - B 
- E - D - A - G
-037 *
-038 *  We also have a lazy grouping by "root 
procedure", and a list of
-039 *  unlinked procedures. If after reading 
all the WALs we have unlinked
-040 *  procedures it means that we had a 
missing WAL or a corruption.
-041 *  rootHead = A - D 
- G
-042 * B E
-043 * C
-044 *  unlinkFromLinkList = None
-045 * /pre
-046 */
-047class WALProcedureMap {
-048
-049  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureMap.class);
-050
-051  private static class Entry {
-052// For bucketed linked lists in 
hash-table.
-053private Entry hashNext;
-054// child head
-055private Entry childHead;
-056// double-link for rootHead or 
childHead
-057private Entry linkNext;
-058private Entry linkPrev;
-059// replay double-linked-list
-060private Entry replayNext;
-061private Entry replayPrev;
-062// procedure-infos
-063private Procedure? 
procedure;
-064private ProcedureProtos.Procedure 
proto;
-065private boolean ready = false;
-066
-067public Entry(Entry hashNext) {
-068  this.hashNext = hashNext;
-069}
-070
-071public long getProcId() {
-072  return proto.getProcId();
-073}
-074
-075public long getParentId() {
-076  return proto.getParentId();
-077}
-078
-079public boolean hasParent() {
-080  return proto.hasParentId();
+020import java.util.Collection;
+021import java.util.Collections;
+022import java.util.HashMap;
+023import java.util.Map;
+024import 
org.apache.yetus.audience.InterfaceAudience;
+025import org.slf4j.Logger;
+026import org.slf4j.LoggerFactory;
+027
+028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+029
+030/**
+031 * This class is used to track the active 
procedures when loading procedures from proc wal file.
+032 * p/
+033 * We will read proc wal files from new 
to old, but when reading a proc wal file, we will still read
+034 * from top to bottom, so there are two 
groups of methods for this class.
+035 * p/
+036 * The first group is {@link 
#add(ProcedureProtos.Procedure)} and {@link #remove(long)}. It is used
+037 * when reading a proc wal file. In these 
methods, for the same procedure, typically the one comes
+038 * later should win, please see the 
comment for
+039 * {@link 
#isIncreasing(ProcedureProtos.Procedure, ProcedureProtos.Procedure)} to see 
the
+040 * exceptions.
+041 * p/
+042 * The second group is {@link 
#merge(WALProcedureMap)}. We will have a global
+043 * {@link WALProcedureMap} to hold global 
the active procedures, and a local {@link WALProcedureMap}
+044 * to hold the active procedures for the 
current proc wal file. And when we finish reading a proc
+045 * wal file, we will merge the local one 
into the global one, by calling the
+046 * {@link #merge(WALProcedureMap)} method 
of the global one and pass the local one in. In this
+047 * method, for the same procedure, the 
one comes earlier will win, as we read the proc wal files
+048 * from new to old(the reverse order).
+049 */
+050@InterfaceAudience.Private
+051class WALProcedureMap {
+052
+053  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureMap.class);
+054
+055  private final MapLong, 
ProcedureProtos.Procedure procMap = new HashMap();
+056
+057  private long minModifiedProcId = 
Long.MAX_VALUE;
+058
+059  private long maxModifiedProcId = 
Long.MIN_VALUE;
+060
+061  private void trackProcId(long procId) 
{
+062minModifiedProcId = 

[28/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
index d89e994..b6e5205 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class StochasticLoadBalancer.CostFromRegionLoadFunction
+abstract static class StochasticLoadBalancer.CostFromRegionLoadFunction
 extends StochasticLoadBalancer.CostFunction
 Base class the allows writing costs functions from rolling 
average of some
  number from RegionLoad.
@@ -250,7 +250,7 @@ extends 
 
 clusterStatus
-privateClusterMetrics clusterStatus
+privateClusterMetrics clusterStatus
 
 
 
@@ -259,7 +259,7 @@ extends 
 
 loads
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeBalancerRegionLoad loads
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeBalancerRegionLoad loads
 
 
 
@@ -268,7 +268,7 @@ extends 
 
 stats
-privatedouble[] stats
+privatedouble[] stats
 
 
 
@@ -285,7 +285,7 @@ extends 
 
 CostFromRegionLoadFunction
-CostFromRegionLoadFunction(org.apache.hadoop.conf.Configurationconf)
+CostFromRegionLoadFunction(org.apache.hadoop.conf.Configurationconf)
 
 
 
@@ -302,7 +302,7 @@ extends 
 
 setClusterMetrics
-voidsetClusterMetrics(ClusterMetricsstatus)
+voidsetClusterMetrics(ClusterMetricsstatus)
 
 
 
@@ -311,7 +311,7 @@ extends 
 
 setLoads
-voidsetLoads(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeBalancerRegionLoadl)
+voidsetLoads(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeBalancerRegionLoadl)
 
 
 
@@ -320,7 +320,7 @@ extends 
 
 cost
-doublecost()
+doublecost()
 
 Specified by:
 costin
 classStochasticLoadBalancer.CostFunction
@@ -333,7 +333,7 @@ extends 
 
 getRegionLoadCost
-protecteddoublegetRegionLoadCost(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionBalancerRegionLoadregionLoadList)
+protecteddoublegetRegionLoadCost(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionBalancerRegionLoadregionLoadList)
 
 
 
@@ -342,7 +342,7 @@ extends 
 
 getCostFromRl
-protected abstractdoublegetCostFromRl(BalancerRegionLoadrl)
+protected abstractdoublegetCostFromRl(BalancerRegionLoadrl)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 1318a28..3722d00 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class StochasticLoadBalancer.CostFunction
+abstract static class 

[28/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 45ef29f..424a9eb 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -887,14 +887,14 @@
 
 admin
 - Static variable in class org.apache.hadoop.hbase.client.replication.TestReplicationAdminWithTwoDifferentZKClusters
 
+admin
 - Variable in class org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
 admin - 
Variable in class org.apache.hadoop.hbase.client.TestAdmin1
 
 admin - 
Variable in class org.apache.hadoop.hbase.client.TestAdmin2
 
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestAsyncAdminBase
 
-admin
 - Variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
-
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestServerLoadDurability
 
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestSnapshotCloneIndependence
@@ -4465,7 +4465,17 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobCloneSnapshotFromClientNormal
 
-CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClient
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterSplittingRegions
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterTruncate
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientClone
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientGetCompactionState
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSchemaChange
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSimple
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence
 
@@ -4511,7 +4521,17 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestReplicaWithCluster
 
-CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientAfterSplittingRegions
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientAfterTruncate
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientClone
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientGetCompactionState
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientSchemaChange
+
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientSimple
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientWithRegionReplicas
 
@@ -5449,6 +5469,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure.TestZKProcedureControllers
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.store.TestBitSetNode
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.store.TestProcedureStoreTracker
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.store.wal.TestForceUpdateProcedure
@@ -9287,6 +9309,8 @@
 
 countRows(Table)
 - Method in class org.apache.hadoop.hbase.client.CloneSnapshotFromClientTestBase
 
+countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
 countRows(Table)
 - Method in class org.apache.hadoop.hbase.client.TestMobCloneSnapshotFromClientAfterSplittingRegion
 
 countRows(Table)
 - Method in class org.apache.hadoop.hbase.client.TestMobCloneSnapshotFromClientCloneLinksAfterDelete
@@ -9295,11 +9319,19 @@
 
 countRows(Table)
 - Method in class org.apache.hadoop.hbase.client.TestMobCloneSnapshotFromClientNormal
 
-countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClient
+countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterSplittingRegions
 
-countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence
+countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterTruncate
+
+countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientClone
 
-countRows(Table,
 byte[]...) - Method in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
+countRows(Table,
 byte[]...) - Method in class 

[28/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
index 061ce80..bdfc3f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
@@ -39,2126 +39,2163 @@
 031import java.util.Set;
 032import 
java.util.concurrent.ConcurrentHashMap;
 033import 
java.util.concurrent.CopyOnWriteArrayList;
-034import java.util.concurrent.TimeUnit;
-035import 
java.util.concurrent.atomic.AtomicBoolean;
-036import 
java.util.concurrent.atomic.AtomicInteger;
-037import 
java.util.concurrent.atomic.AtomicLong;
-038import java.util.stream.Collectors;
-039import java.util.stream.Stream;
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-048import 
org.apache.hadoop.hbase.security.User;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.IdLock;
-051import 
org.apache.hadoop.hbase.util.NonceKey;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056
-057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+034import java.util.concurrent.Executor;
+035import java.util.concurrent.Executors;
+036import java.util.concurrent.TimeUnit;
+037import 
java.util.concurrent.atomic.AtomicBoolean;
+038import 
java.util.concurrent.atomic.AtomicInteger;
+039import 
java.util.concurrent.atomic.AtomicLong;
+040import java.util.stream.Collectors;
+041import java.util.stream.Stream;
+042import 
org.apache.hadoop.conf.Configuration;
+043import 
org.apache.hadoop.hbase.HConstants;
+044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
+050import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+051import 
org.apache.hadoop.hbase.security.User;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053import 
org.apache.hadoop.hbase.util.IdLock;
+054import 
org.apache.hadoop.hbase.util.NonceKey;
+055import 
org.apache.hadoop.hbase.util.Threads;
+056import 
org.apache.yetus.audience.InterfaceAudience;
+057import org.slf4j.Logger;
+058import org.slf4j.LoggerFactory;
 059
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-061
-062/**
-063 * Thread Pool that executes the 
submitted procedures.
-064 * The executor has a ProcedureStore 
associated.
-065 * Each operation is logged and on 
restart the pending procedures are resumed.
-066 *
-067 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-068 * the procedure will complete (at some 
point in time), On restart the pending
-069 * procedures are resumed and the once 
failed will be rolledback.
+060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+061import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+062import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+063
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+065
+066/**
+067 * Thread Pool that executes the 
submitted procedures.
+068 * The executor has a ProcedureStore 
associated.
+069 * Each operation is logged and on 
restart the pending procedures are resumed.
 070 *
-071 * The user can add procedures to the 
executor via submitProcedure(proc)
-072 * check for the finished state via 
isFinished(procId)
-073 * and get the result via 
getResult(procId)
-074 */
-075@InterfaceAudience.Private
-076public class 
ProcedureExecutorTEnvironment {
-077  private static 

[28/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
index c7f1e58..c74635e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
@@ -164,897 +164,923 @@
 156   */
 157  private volatile boolean bypass = 
false;
 158
-159  public boolean isBypass() {
-160return bypass;
-161  }
-162
-163  /**
-164   * set the bypass to true
-165   * Only called in {@link 
ProcedureExecutor#bypassProcedure(long, long, boolean)} for now,
-166   * DO NOT use this method alone, since 
we can't just bypass
-167   * one single procedure. We need to 
bypass its ancestor too. So making it package private
-168   */
-169  void bypass() {
-170this.bypass = true;
-171  }
+159  /**
+160   * Indicate whether we need to persist 
the procedure to ProcedureStore after execution. Default to
+161   * true, and the implementation can all 
{@link #skipPersistence()} to let the framework skip the
+162   * persistence of the procedure.
+163   * p/
+164   * This is useful when the procedure is 
in error and you want to retry later. The retry interval
+165   * and the number of retries are 
usually not critical so skip the persistence can save some
+166   * resources, and also speed up the 
restart processing.
+167   * p/
+168   * Notice that this value will be reset 
to true every time before execution. And when rolling back
+169   * we do not test this value.
+170   */
+171  private boolean persist = true;
 172
-173  /**
-174   * The main code of the procedure. It 
must be idempotent since execute()
-175   * may be called multiple times in case 
of machine failure in the middle
-176   * of the execution.
-177   * @param env the environment passed to 
the ProcedureExecutor
-178   * @return a set of sub-procedures to 
run or ourselves if there is more work to do or null if the
-179   * procedure is done.
-180   * @throws ProcedureYieldException the 
procedure will be added back to the queue and retried later.
-181   * @throws InterruptedException the 
procedure will be added back to the queue and retried later.
-182   * @throws ProcedureSuspendedException 
Signal to the executor that Procedure has suspended itself and
-183   * has set itself up waiting for an 
external event to wake it back up again.
-184   */
-185  protected abstract 
ProcedureTEnvironment[] execute(TEnvironment env)
-186throws ProcedureYieldException, 
ProcedureSuspendedException, InterruptedException;
-187
-188  /**
-189   * The code to undo what was done by 
the execute() code.
-190   * It is called when the procedure or 
one of the sub-procedures failed or an
-191   * abort was requested. It should 
cleanup all the resources created by
-192   * the execute() call. The 
implementation must be idempotent since rollback()
-193   * may be called multiple time in case 
of machine failure in the middle
-194   * of the execution.
-195   * @param env the environment passed to 
the ProcedureExecutor
-196   * @throws IOException temporary 
failure, the rollback will retry later
-197   * @throws InterruptedException the 
procedure will be added back to the queue and retried later
-198   */
-199  protected abstract void 
rollback(TEnvironment env)
-200throws IOException, 
InterruptedException;
-201
-202  /**
-203   * The abort() call is asynchronous and 
each procedure must decide how to deal
-204   * with it, if they want to be 
abortable. The simplest implementation
-205   * is to have an AtomicBoolean set in 
the abort() method and then the execute()
-206   * will check if the abort flag is set 
or not.
-207   * abort() may be called multiple times 
from the client, so the implementation
-208   * must be idempotent.
-209   *
-210   * pNOTE: abort() is not like 
Thread.interrupt(). It is just a notification
-211   * that allows the procedure 
implementor abort.
-212   */
-213  protected abstract boolean 
abort(TEnvironment env);
-214
-215  /**
-216   * The user-level code of the procedure 
may have some state to
-217   * persist (e.g. input arguments or 
current position in the processing state) to
-218   * be able to resume on failure.
-219   * @param serializer stores the 
serializable state
-220   */
-221  protected abstract void 
serializeStateData(ProcedureStateSerializer serializer)
-222throws IOException;
-223
-224  /**
-225   * Called on store load to allow the 
user to decode the previously serialized
-226   * state.
-227   * @param serializer contains the 
serialized state
-228   */
-229  protected abstract void 
deserializeStateData(ProcedureStateSerializer serializer)
-230throws IOException;

[28/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index cab645e..9e659a0 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HRegionServer
+public class HRegionServer
 extends HasThread
 implements RegionServerServices, LastSequenceId, 
ConfigurationObserver
 HRegionServer makes a set of HRegions available to clients. 
It checks in with
@@ -1539,7 +1539,7 @@ implements 
 
 INIT_PAUSE_TIME_MS
-private static finalint INIT_PAUSE_TIME_MS
+private static finalint INIT_PAUSE_TIME_MS
 
 See Also:
 Constant
 Field Values
@@ -1552,7 +1552,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1561,7 +1561,7 @@ implements 
 
 TEST_SKIP_REPORTING_TRANSITION
-public staticboolean TEST_SKIP_REPORTING_TRANSITION
+public staticboolean TEST_SKIP_REPORTING_TRANSITION
 For testing only!  Set to true to skip notifying region 
assignment to master .
 
 
@@ -1571,7 +1571,7 @@ implements 
 
 regionsInTransitionInRS
-protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean regionsInTransitionInRS
+protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean regionsInTransitionInRS
 
 
 
@@ -1580,7 +1580,7 @@ implements 
 
 cacheFlusher
-protectedMemStoreFlusher cacheFlusher
+protectedMemStoreFlusher cacheFlusher
 
 
 
@@ -1589,7 +1589,7 @@ implements 
 
 hMemManager
-protectedHeapMemoryManager hMemManager
+protectedHeapMemoryManager hMemManager
 
 
 
@@ -1598,7 +1598,7 @@ implements 
 
 clusterConnection
-protectedClusterConnection clusterConnection
+protectedClusterConnection clusterConnection
 Cluster connection to be shared by services.
  Initialized at server startup and closed when server shuts down.
  Clients must never close it explicitly.
@@ -1613,7 +1613,7 @@ implements 
 
 metaTableLocator
-protectedMetaTableLocator metaTableLocator
+protectedMetaTableLocator metaTableLocator
 
 
 
@@ -1622,7 +1622,7 @@ implements 
 
 tableDescriptors
-protectedTableDescriptors tableDescriptors
+protectedTableDescriptors tableDescriptors
 Go here to get table descriptors.
 
 
@@ -1632,7 +1632,7 @@ implements 
 
 replicationSourceHandler
-protectedReplicationSourceService replicationSourceHandler
+protectedReplicationSourceService replicationSourceHandler
 
 
 
@@ -1641,7 +1641,7 @@ implements 
 
 replicationSinkHandler
-protectedReplicationSinkService replicationSinkHandler
+protectedReplicationSinkService replicationSinkHandler
 
 
 
@@ -1650,7 +1650,7 @@ implements 
 
 compactSplitThread
-publicCompactSplit compactSplitThread
+publicCompactSplit compactSplitThread
 
 
 
@@ -1659,7 +1659,7 @@ implements 
 
 onlineRegions
-protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegion onlineRegions
+protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegion onlineRegions
 Map of regions currently being served by this region 
server. Key is the
  encoded region name.  All access should be synchronized.
 
@@ -1670,7 +1670,7 @@ implements 
 
 regionFavoredNodesMap
-protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress[] regionFavoredNodesMap
+protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
index 49f081b..33c9cc0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
@@ -35,309 +35,328 @@
 027import 
java.util.concurrent.BlockingQueue;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030import 
java.util.concurrent.LinkedBlockingQueue;
-031import 
java.util.concurrent.ThreadPoolExecutor;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.monitoring.ThreadMonitoring;
+030import java.util.concurrent.Executors;
+031import 
java.util.concurrent.LinkedBlockingQueue;
+032import 
java.util.concurrent.ThreadPoolExecutor;
+033import java.util.concurrent.TimeUnit;
+034import 
java.util.concurrent.atomic.AtomicLong;
+035import 
org.apache.hadoop.hbase.monitoring.ThreadMonitoring;
+036import 
org.apache.yetus.audience.InterfaceAudience;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
 039
 040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 041import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 042import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-043import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-044
-045/**
-046 * This is a generic executor service. 
This component abstracts a
-047 * threadpool, a queue to which {@link 
EventType}s can be submitted,
-048 * and a 
codeRunnable/code that handles the object that is added to the 
queue.
-049 *
-050 * pIn order to create a new 
service, create an instance of this class and
-051 * then do: 
codeinstance.startExecutorService("myService");/code.  When 
done
-052 * call {@link #shutdown()}.
-053 *
-054 * pIn order to use the service 
created above, call
-055 * {@link #submit(EventHandler)}.
-056 */
-057@InterfaceAudience.Private
-058public class ExecutorService {
-059  private static final Logger LOG = 
LoggerFactory.getLogger(ExecutorService.class);
-060
-061  // hold the all the executors created 
in a map addressable by their names
-062  private final 
ConcurrentHashMapString, Executor executorMap = new 
ConcurrentHashMap();
+043import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture;
+044import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListeningScheduledExecutorService;
+045import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecutors;
+046import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+047
+048/**
+049 * This is a generic executor service. 
This component abstracts a
+050 * threadpool, a queue to which {@link 
EventType}s can be submitted,
+051 * and a 
codeRunnable/code that handles the object that is added to the 
queue.
+052 *
+053 * pIn order to create a new 
service, create an instance of this class and
+054 * then do: 
codeinstance.startExecutorService("myService");/code.  When 
done
+055 * call {@link #shutdown()}.
+056 *
+057 * pIn order to use the service 
created above, call
+058 * {@link #submit(EventHandler)}.
+059 */
+060@InterfaceAudience.Private
+061public class ExecutorService {
+062  private static final Logger LOG = 
LoggerFactory.getLogger(ExecutorService.class);
 063
-064  // Name of the server hosting this 
executor service.
-065  private final String servername;
+064  // hold the all the executors created 
in a map addressable by their names
+065  private final 
ConcurrentHashMapString, Executor executorMap = new 
ConcurrentHashMap();
 066
-067  /**
-068   * Default constructor.
-069   * @param servername Name of the 
hosting server.
-070   */
-071  public ExecutorService(final String 
servername) {
-072super();
-073this.servername = servername;
-074  }
-075
-076  /**
-077   * Start an executor service with a 
given name. If there was a service already
-078   * started with the same name, this 
throws a RuntimeException.
-079   * @param name Name of the service to 
start.
-080   */
-081  @VisibleForTesting
-082  public void startExecutorService(String 
name, int maxThreads) {
-083if (this.executorMap.get(name) != 
null) {
-084  throw new RuntimeException("An 
executor service with the name " + name +
-085" is already running!");
-086}
-087Executor 

[28/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 

[28/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterListWithOR.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterListWithOR.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterListWithOR.html
index f847dd8..f2cf6a7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterListWithOR.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterListWithOR.html
@@ -34,395 +34,393 @@
 026
 027import java.io.IOException;
 028import java.util.ArrayList;
-029import java.util.Arrays;
-030import java.util.Collections;
-031import java.util.List;
-032import java.util.Objects;
-033
-034/**
-035 * FilterListWithOR represents an ordered 
list of filters which will be evaluated with an OR
-036 * operator.
-037 */
-038@InterfaceAudience.Private
-039public class FilterListWithOR extends 
FilterListBase {
-040
-041  /**
-042   * Save previous return code and 
previous cell for every filter in filter list. For MUST_PASS_ONE,
-043   * we use the previous return code to 
decide whether we should pass current cell encountered to
-044   * the filter. For MUST_PASS_ALL, the 
two list are meaningless.
-045   */
-046  private ListReturnCode 
prevFilterRCList = null;
-047  private ListCell prevCellList = 
null;
-048
-049  public 
FilterListWithOR(ListFilter filters) {
-050super(filters);
-051prevFilterRCList = new 
ArrayList(Collections.nCopies(filters.size(), null));
-052prevCellList = new 
ArrayList(Collections.nCopies(filters.size(), null));
-053subFiltersIncludedCell = new 
ArrayList(Collections.nCopies(filters.size(), false));
-054  }
-055
-056  @Override
-057  public void 
addFilterLists(ListFilter filters) {
-058if (checkAndGetReversed(filters, 
isReversed()) != isReversed()) {
-059  throw new 
IllegalArgumentException("Filters in the list must have the same reversed 
flag");
-060}
-061this.filters.addAll(filters);
-062
this.subFiltersIncludedCell.addAll(Collections.nCopies(filters.size(), 
false));
-063
this.prevFilterRCList.addAll(Collections.nCopies(filters.size(), null));
-064
this.prevCellList.addAll(Collections.nCopies(filters.size(), null));
-065  }
-066
-067  @Override
-068  protected String 
formatLogFilters(ListFilter logFilters) {
-069return String.format("FilterList OR 
(%d/%d): %s", logFilters.size(), this.size(),
-070  logFilters.toString());
-071  }
-072
-073  /**
-074   * For MUST_PASS_ONE, we cannot make 
sure that when filter-A in filter list return NEXT_COL then
-075   * the next cell passing to filterList 
will be the first cell in next column, because if filter-B
-076   * in filter list return SKIP, then the 
filter list will return SKIP. In this case, we should pass
-077   * the cell following the previous 
cell, and it's possible that the next cell has the same column
-078   * as the previous cell even if 
filter-A has NEXT_COL returned for the previous cell. So we should
-079   * save the previous cell and the 
return code list when checking previous cell for every filter in
-080   * filter list, and verify if 
currentCell fit the previous return code, if fit then pass the
-081   * currentCell to the corresponding 
filter. (HBASE-17678) br
-082   * Note that: In StoreScanner level, 
NEXT_ROW will skip to the next row in current family, and in
-083   * RegionScanner level, NEXT_ROW will 
skip to the next row in current family and switch to the
-084   * next family for RegionScanner, 
INCLUDE_AND_NEXT_ROW is the same. so we should pass current cell
-085   * to the filter, if row mismatch or 
row match but column family mismatch. (HBASE-18368)
-086   * @see 
org.apache.hadoop.hbase.filter.Filter.ReturnCode
-087   */
-088  private boolean 
shouldPassCurrentCellToFilter(Cell prevCell, Cell currentCell,
-089  ReturnCode prevCode) throws 
IOException {
-090if (prevCell == null || prevCode == 
null) {
-091  return true;
-092}
-093switch (prevCode) {
-094case INCLUDE:
-095case SKIP:
-096  return true;
-097case SEEK_NEXT_USING_HINT:
-098  Cell nextHintCell = 
getNextCellHint(prevCell);
-099  return nextHintCell == null || 
this.compareCell(currentCell, nextHintCell) = 0;
-100case NEXT_COL:
-101case INCLUDE_AND_NEXT_COL:
-102  // Once row changed, reset() will 
clear prevCells, so we need not to compare their rows
-103  // because rows are the same 
here.
-104  return 
!CellUtil.matchingColumn(prevCell, currentCell);
-105case NEXT_ROW:
-106case INCLUDE_AND_SEEK_NEXT_ROW:
-107  // As described above, rows are 
definitely the same, so we only compare the family.
-108  return 
!CellUtil.matchingFamily(prevCell, currentCell);
-109default:
-110  throw new 
IllegalStateException("Received code is not valid.");
-111}
-112  }
-113
-114  /**
-115   * FilterList with MUST_PASS_ONE 

[28/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 07cb760..ee9fe5b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -3241,7 +3241,7 @@ implements 
 
 FOR_UNIT_TESTS_ONLY
-private static finalbyte[] FOR_UNIT_TESTS_ONLY
+private static finalbyte[] FOR_UNIT_TESTS_ONLY
 Row needed by below method.
 
 
@@ -3251,7 +3251,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -3260,7 +3260,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -3269,7 +3269,7 @@ implements 
 
 MOCKED_LIST
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
 A mocked list implementation - discards all updates.
 
 
@@ -4829,7 +4829,7 @@ publiclong
 
 getNextSequenceId
-protectedlonggetNextSequenceId(WALwal)
+protectedlonggetNextSequenceId(WALwal)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Method to safely get the next sequence number.
 
@@ -4846,7 +4846,7 @@ publiclong
 
 getScanner
-publicHRegion.RegionScannerImplgetScanner(Scanscan)
+publicHRegion.RegionScannerImplgetScanner(Scanscan)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Region
 Return an iterator that scans over the HRegion, returning 
the indicated
@@ -4871,7 +4871,7 @@ publiclong
 
 getScanner
-publicHRegion.RegionScannerImplgetScanner(Scanscan,
+publicHRegion.RegionScannerImplgetScanner(Scanscan,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Region
@@ -4900,7 +4900,7 @@ publiclong
 
 getScanner
-privateHRegion.RegionScannerImplgetScanner(Scanscan,
+privateHRegion.RegionScannerImplgetScanner(Scanscan,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
  longnonceGroup,
  longnonce)
@@ -4917,7 +4917,7 @@ publiclong
 
 instantiateRegionScanner
-protectedRegionScannerinstantiateRegionScanner(Scanscan,
+protectedRegionScannerinstantiateRegionScanner(Scanscan,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -4932,7 +4932,7 @@ publiclong
 
 instantiateRegionScanner
-protectedHRegion.RegionScannerImplinstantiateRegionScanner(Scanscan,
+protectedHRegion.RegionScannerImplinstantiateRegionScanner(Scanscan,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
  
longnonceGroup,
  longnonce)
@@ -4949,7 +4949,7 @@ publiclong
 
 prepareDelete
-publicvoidprepareDelete(Deletedelete)
+publicvoidprepareDelete(Deletedelete)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Prepare a delete for a row mutation processor
 
@@ -4966,7 +4966,7 @@ publiclong
 
 delete
-publicvoiddelete(Deletedelete)
+publicvoiddelete(Deletedelete)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
index 96bdd09..2553d09 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
@@ -28,143 +28,158 @@
 020package org.apache.hadoop.hbase.filter;
 021
 022import java.util.ArrayList;
-023
-024import 
org.apache.hadoop.hbase.ByteBufferExtendedCell;
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-029import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-030import 
org.apache.hadoop.hbase.util.Bytes;
-031
-032import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-033import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-034import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-035
-036/**
-037 * Pass results that have same row 
prefix.
-038 */
-039@InterfaceAudience.Public
-040public class PrefixFilter extends 
FilterBase {
-041  protected byte [] prefix = null;
-042  protected boolean passedPrefix = 
false;
-043  protected boolean filterRow = true;
-044
-045  public PrefixFilter(final byte [] 
prefix) {
-046this.prefix = prefix;
-047  }
-048
-049  public byte[] getPrefix() {
-050return prefix;
-051  }
-052
-053  @Override
-054  public boolean filterRowKey(Cell 
firstRowCell) {
-055if (firstRowCell == null || 
this.prefix == null)
-056  return true;
-057if (filterAllRemaining()) return 
true;
-058int length = 
firstRowCell.getRowLength();
-059if (length  prefix.length) return 
true;
-060// if they are equal, return false 
= pass row
-061// else return true, filter row
-062// if we are passed the prefix, set 
flag
-063int cmp;
-064if (firstRowCell instanceof 
ByteBufferExtendedCell) {
-065  cmp = 
ByteBufferUtils.compareTo(((ByteBufferExtendedCell) 
firstRowCell).getRowByteBuffer(),
-066  ((ByteBufferExtendedCell) 
firstRowCell).getRowPosition(), this.prefix.length,
-067  this.prefix, 0, 
this.prefix.length);
-068} else {
-069  cmp = 
Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(),
-070  this.prefix.length, 
this.prefix, 0, this.prefix.length);
-071}
-072if ((!isReversed()  cmp 
 0) || (isReversed()  cmp  0)) {
-073  passedPrefix = true;
-074}
-075filterRow = (cmp != 0);
-076return filterRow;
-077  }
-078
-079  @Deprecated
-080  @Override
-081  public ReturnCode filterKeyValue(final 
Cell c) {
-082return filterCell(c);
-083  }
-084
-085  @Override
-086  public ReturnCode filterCell(final Cell 
c) {
-087if (filterRow) return 
ReturnCode.NEXT_ROW;
-088return ReturnCode.INCLUDE;
-089  }
-090
-091  @Override
-092  public boolean filterRow() {
-093return filterRow;
-094  }
-095
-096  @Override
-097  public void reset() {
-098filterRow = true;
-099  }
-100
-101  @Override
-102  public boolean filterAllRemaining() {
-103return passedPrefix;
-104  }
-105
-106  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-107
Preconditions.checkArgument(filterArguments.size() == 1,
-108"Expected 
1 but got: %s", filterArguments.size());
-109byte [] prefix = 
ParseFilter.removeQuotesFromByteArray(filterArguments.get(0));
-110return new PrefixFilter(prefix);
-111  }
-112
-113  /**
-114   * @return The filter serialized using 
pb
-115   */
-116  @Override
-117  public byte [] toByteArray() {
-118FilterProtos.PrefixFilter.Builder 
builder =
-119  
FilterProtos.PrefixFilter.newBuilder();
-120if (this.prefix != null) 
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
-121return 
builder.build().toByteArray();
-122  }
-123
-124  /**
-125   * @param pbBytes A pb serialized 
{@link PrefixFilter} instance
-126   * @return An instance of {@link 
PrefixFilter} made from codebytes/code
-127   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-128   * @see #toByteArray
-129   */
-130  public static PrefixFilter 
parseFrom(final byte [] pbBytes)
-131  throws DeserializationException {
-132FilterProtos.PrefixFilter proto;
-133try {
-134  proto = 
FilterProtos.PrefixFilter.parseFrom(pbBytes);
-135} catch 
(InvalidProtocolBufferException e) {
-136  throw new 
DeserializationException(e);
-137}
-138return new 
PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null);
-139  }
-140
-141  /**
-142   * @param o the other 

[28/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889  

[28/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index e50237f..075ad15 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":9,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":9,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -49,8 +49,8 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
-NextClass
+PrevClass
+NextClass
 
 
 Frames
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionStates
+public class RegionStates
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 RegionStates contains a set of Maps that describes the 
in-memory state of the AM, with
  the regions available in the system, the region in transition, the offline 
regions and
@@ -134,39 +134,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Class and Description
 
 
-private static class
-RegionStates.AssignmentProcedureEvent
-
-
 static class
 RegionStates.RegionFailedOpen
 
-
-static class
-RegionStates.RegionStateNode
-Current Region State.
-
-
 
 private static class
 RegionStates.RegionStateStampComparator
 
-
-private static class
-RegionStates.ServerReportEvent
-
-
-static class
-RegionStates.ServerState
-Server State.
-
-
-
-static class
-RegionStates.ServerStateNode
-State of Server; list of hosted regions, etc.
-
-
 
 
 
@@ -195,31 +169,31 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 regionFailedOpen
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStateNode
 regionInTransition
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStateNode
 regionOffline
 Regions marked as offline on a read of hbase:meta.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionStateNode
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStateNode
 regionsMap
 RegionName -- i.e.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMapServerName,RegionStates.ServerStateNode
+private 

[28/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i  
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapperImmutableBytesWritable, Cell {
-259private Mapbyte[], byte[] 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapperImmutableBytesWritable, Mutation {
-304private Mapbyte[], byte[] 
cfRenameMap;
-305private ListUUID 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  

[28/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index b7b4236..3d1edb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + = See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * /code
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializerCacheable() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * pTODO: The caller presumes 
no checksumming
-330   * required of this block instance 
since going into cache; checksum already verified on
-331   * underlying block data pulled in from 
filesystem. Is that correct? What if cache is SSD?
+256  public static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER = new 
BlockDeserializer();
+257
+258  public static final class 
BlockDeserializer implements CacheableDeserializerCacheable {
+259private BlockDeserializer() {
+260}
+261
+262@Override
+263public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
+264throws IOException {
+265  // The buf has the file block 
followed by block metadata.
+266  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
+267  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
+268  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
+269  ByteBuff newByteBuff;
+270  if (reuse) {
+271newByteBuff = buf.slice();
+272  } else {
+273int len = buf.limit();
+274newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
+275newByteBuff.put(0, buf, 
buf.position(), 

[28/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
index 0755bb8..d203164 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class RSProcedureDispatcher.OpenRegionRemoteCall
+private final class RSProcedureDispatcher.OpenRegionRemoteCall
 extends RSProcedureDispatcher.AbstractRSRemoteCall
 Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to open regions using old
  AdminService#openRegion(RpcController, OpenRegionRequest, 
RpcCallback) rpc.
@@ -230,7 +230,7 @@ extends 
 
 operations
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation
 operations
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation
 operations
 
 
 
@@ -247,7 +247,7 @@ extends 
 
 OpenRegionRemoteCall
-publicOpenRegionRemoteCall(ServerNameserverName,
+publicOpenRegionRemoteCall(ServerNameserverName,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
 
@@ -265,7 +265,7 @@ extends 
 
 call
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
 
 Specified by:
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--;
 title="class or interface in java.util.concurrent">callin 
interfacehttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
@@ -280,7 +280,7 @@ extends 
 
 sendRequest
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName,
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName,

  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestrequest)

   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -295,7 +295,7 @@ extends 
 
 remoteCallFailed
-privatevoidremoteCallFailed(MasterProcedureEnvenv,
+privatevoidremoteCallFailed(MasterProcedureEnvenv,
   https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptione)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
index 631ada1..ffb38bf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RSProcedureDispatcher.RegionCloseOperation
+public static class RSProcedureDispatcher.RegionCloseOperation
 extends RSProcedureDispatcher.RegionOperation
 
 
@@ -243,7 +243,7 @@ extends 
 
 destinationServer
-private finalServerName destinationServer
+private finalServerName destinationServer
 
 
 
@@ -252,7 +252,7 @@ extends 
 
 closed
-privateboolean closed
+privateboolean closed
 
 
 
@@ -269,7 +269,7 @@ extends 
 
 RegionCloseOperation

[28/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648
-649  return 

[28/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/InvalidFamilyOperationException.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/InvalidFamilyOperationException.html 
b/apidocs/org/apache/hadoop/hbase/InvalidFamilyOperationException.html
index 74ecf35..c0db59c 100644
--- a/apidocs/org/apache/hadoop/hbase/InvalidFamilyOperationException.html
+++ b/apidocs/org/apache/hadoop/hbase/InvalidFamilyOperationException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无
框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

ç±» InvalidFamilyOperationException

+

Class InvalidFamilyOperationException


[28/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index f29213b..34a97e9 100644
--- a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":41,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42,"i73":42,"i74":42};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var tabs = 
{65535:["t0","所有方法"],1:["t1","静态方法"],2:["t2","实例方法"],8:["t4","å
…·ä½“方法"],32:["t6","已过时的方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 

[28/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index aa21ddf..9a66d6a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -251,7 +251,7 @@ extends Segment
-close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCellSet,
 getComparator,
 getDataSize,
 getHeapSize,
 getMemStoreLAB,
 getMemStoreSize,
 getMinSequenceId,
 getOffHeapSize,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet,
 heapSizeChange,
 incMemStoreSize,
 incScannerCount,
 indexEntryOffHeapSize,
 indexEntryOnHeapSize,
 indexEntryS
 ize, internalAdd,
 isEmpty,
 isTagsPresent,
 iterator,
 last,
 maybeCloneWithAllocator,
 offHeapSizeChange,
 setCellSet,
 shouldSeek,
 tailSet,
 updateMetaInfo,
 updateMetaInfo
+close,
 compare,
 compareAndSetDataSize,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount, getCellSet,
 getComparator,
 getDataSize,
 getHeapSize,
 getMemStoreLAB,
 getMemStoreSize,
 getMinSequenceId,
 getOffHeapSize,
 getScanner, href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getScanners-long-">getScanners,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#getTimeRangeTracker--">getTimeRangeTracker,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#headSet-org.apache.hadoop.hbase.Cell-">headSet,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#heapSizeChange-org.apache.hadoop.hbase.Cell-boolean-">heapSizeChange,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incMemStoreSize-long-long-long-">incMemStoreSize,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#incScannerCount--">incScannerCount,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOffHeapSize-boolean-">indexEntryOffHeapSize,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/Segment.html#indexEntryOnHeapSize-bool
 ean-">indexEntryOnHeapSize, indexEntrySize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator,
 last,
 maybeCloneWithAllocator,
 offHeapSizeChange, setCellSet,
 sharedLock,
 sharedUnlock,
 shouldSeek,
 tailSet,
 updateMetaInfo,
 updateMetaInfo,
 waitForUpdates
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.html
index 78e1e45..2712e70 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MemStoreCompactor
+public class MemStoreCompactor
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 The ongoing MemStore Compaction manager, dispatches a solo 
running compaction and interrupts
  the compaction if requested. The compaction is interrupted and stopped by 
CompactingMemStore,
@@ -281,7 +281,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -290,7 +290,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -299,7 +299,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 compactingMemStore
-privateCompactingMemStore 
compactingMemStore
+privateCompactingMemStore 
compactingMemStore
 
 
 
@@ -308,7 +308,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 versionedList
-privateVersionedSegmentsList versionedList
+privateVersionedSegmentsList versionedList
 
 
 
@@ -317,7 +317,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isInterrupted
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean isInterrupted
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean isInterrupted
 
 
 
@@ 

[28/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
index b3fb8c3..2e59d6f 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AccessControlLists
+public class AccessControlLists
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Maintains lists of permission grants to users and groups to 
allow for
  authorization checks by AccessController.
@@ -272,11 +272,15 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 (package private) static 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,TablePermission
-getPermissions(org.apache.hadoop.conf.Configurationconf,
+getPermissions(org.apache.hadoop.conf.Configurationconf,
   byte[]entryName,
-  Tablet)
-Reads user permission assignments stored in the 
l: column
- family of the first table row in _acl_.
+  Tablet,
+  byte[]cf,
+  byte[]cq,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
+  booleanhasFilterUser)
+Reads user permission assignments stored in the 
l: column family of the first
+ table row in _acl_.
 
 
 
@@ -286,20 +290,36 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListUserPermission
-getUserNamespacePermissions(org.apache.hadoop.conf.Configurationconf,
-   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespace)
+getUserNamespacePermissions(org.apache.hadoop.conf.Configurationconf,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
+   booleanhasFilterUser)
+Returns the currently granted permissions for a given 
namespace as the specified user plus
+ associated permissions.
+
 
 
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListUserPermission
-getUserPermissions(org.apache.hadoop.conf.Configurationconf,
-  byte[]entryName)
+getUserPermissions(org.apache.hadoop.conf.Configurationconf,
+  byte[]entryName,
+  byte[]cf,
+  byte[]cq,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
+  booleanhasFilterUser)
+Returns the currently granted permissions for a given 
table/namespace with associated
+ permissions based on the specified column family, column qualifier and user 
name.
+
 
 
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListUserPermission
-getUserTablePermissions(org.apache.hadoop.conf.Configurationconf,
-   TableNametableName)
-Returns the currently granted permissions for a given table 
as a list of
- user plus associated permissions.

[28/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index d58acf6..a7c25bf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HBaseAdmin.AbortProcedureFuture
+private static class HBaseAdmin.AbortProcedureFuture
 extends HBaseAdmin.ProcedureFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 
 
@@ -235,7 +235,7 @@ extends 
 
 isAbortInProgress
-privateboolean isAbortInProgress
+privateboolean isAbortInProgress
 
 
 
@@ -252,7 +252,7 @@ extends 
 
 AbortProcedureFuture
-publicAbortProcedureFuture(HBaseAdminadmin,
+publicAbortProcedureFuture(HBaseAdminadmin,
 https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LongprocId,
 https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in 
java.lang">BooleanabortProcResponse)
 
@@ -271,7 +271,7 @@ extends 
 
 get
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Booleanget(longtimeout,
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Booleanget(longtimeout,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in java.util.concurrent">TimeUnitunit)
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index 7778cf7..c40eaa2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HBaseAdmin.AddColumnFamilyFuture
+private static class HBaseAdmin.AddColumnFamilyFuture
 extends HBaseAdmin.ModifyTableFuture
 
 
@@ -246,7 +246,7 @@ extends 
 
 AddColumnFamilyFuture
-publicAddColumnFamilyFuture(HBaseAdminadmin,
+publicAddColumnFamilyFuture(HBaseAdminadmin,
  TableNametableName,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponseresponse)
 
@@ -265,7 +265,7 @@ extends 
 
 getOperationType
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Overrides:
 getOperationTypein
 classHBaseAdmin.ModifyTableFuture

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index c909e9b..d9ab295 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HBaseAdmin.CreateTableFuture
+private static class HBaseAdmin.CreateTableFuture
 extends HBaseAdmin.TableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 
 
@@ -268,7 +268,7 @@ extends 
 
 desc
-private finalTableDescriptor desc
+private 

[28/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index 4d6ae2f..c139704 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":9,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":9,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionStates
+public class RegionStates
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 RegionStates contains a set of Maps that describes the 
in-memory state of the AM, with
  the regions available in the system, the region in transition, the offline 
regions and
@@ -272,251 +272,263 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 addToOfflineRegions(RegionStates.RegionStateNoderegionNode)
 
 
+HRegionLocation
+checkReopened(HRegionLocationoldLoc)
+Check whether the region has been reopened.
+
+
+
 void
 clear()
 
-
+
+private HRegionLocation
+createRegionForReopen(RegionStates.RegionStateNodenode)
+
+
 protected RegionStates.RegionStateNode
 createRegionStateNode(RegionInforegionInfo)
 
-
+
 void
 deleteRegion(RegionInforegionInfo)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getAssignedRegions()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getAssignmentsByTable()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getAssignmentsByTable(booleanforceByCluster)
 This is an EXPENSIVE clone.
 
 
-
+
 double
 getAverageLoad()
 
-
+
 RegionStates.RegionFailedOpen
 getFailedOpen(RegionInforegionInfo)
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-getOpenRegionsOfTable(TableNametable)
-
-
+
 protected RegionStates.RegionStateNode
 getOrCreateRegionStateNode(RegionInforegionInfo)
 
-
+
 RegionStates.ServerStateNode
 getOrCreateServer(ServerNameserverName)
 Be judicious calling this method.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapRegionInfo,ServerName
 getRegionAssignments()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapRegionState.State,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getRegionByStateOfTable(TableNametableName)
 
-
+
 

[28/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Pattern pattern) throws IOException {
-455   

[28/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.MetaOperationType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.MetaOperationType.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.MetaOperationType.html
index 5a3436e..c92c4de 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.MetaOperationType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.MetaOperationType.html
@@ -104,7 +104,9 @@
 
 
 default MetaProcedureInterface.MetaOperationType
-MetaProcedureInterface.getMetaOperationType()
+MetaProcedureInterface.getMetaOperationType()
+Deprecated.
+
 
 
 static MetaProcedureInterface.MetaOperationType

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.html
index 24b9582..48646fa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MetaProcedureInterface.html
@@ -105,8 +105,11 @@
 
 class
 RecoverMetaProcedure
-This procedure recovers meta from prior shutdown/ crash of 
a server, and brings meta online by
- assigning meta region/s.
+Deprecated.
+Do not use any more, leave 
it here only for compatible. The recovery work will be
+ done in ServerCrashProcedure 
directly, and the initial work for meta table
+ will be done by InitMetaProcedure.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
index fa154b7..a274fa7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
@@ -185,7 +185,9 @@
 
 
 private ProcedurePrepareLatch
-RecoverMetaProcedure.syncLatch
+RecoverMetaProcedure.syncLatch
+Deprecated.
+
 
 
 private ProcedurePrepareLatch
@@ -310,6 +312,7 @@
 RecoverMetaProcedure(ServerNamefailedMetaServer,
 booleanshouldSplitLog,
 ProcedurePrepareLatchlatch)
+Deprecated.
 Constructor with latch, for blocking/ sync usage
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/Queue.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/Queue.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/Queue.html
index 845f0ed..adc6405 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/Queue.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/Queue.html
@@ -104,7 +104,12 @@
 
 
 (package private) class
-MetaQueue
+MetaQueue
+Deprecated.
+only used for RecoverMetaProcedure.
 Should be removed along with
+ RecoverMetaProcedure.
+
+
 
 
 (package private) class

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
index 07630ad..0c685e9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
@@ -243,19 +243,23 @@
 
 
 TableProcedureInterface.TableOperationType
-ReopenTableRegionsProcedure.getTableOperationType()
+InitMetaProcedure.getTableOperationType()
 
 
 TableProcedureInterface.TableOperationType
-DeleteNamespaceProcedure.getTableOperationType()
+ReopenTableRegionsProcedure.getTableOperationType()
 
 
+TableProcedureInterface.TableOperationType

[28/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
index f236300..513d2ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.MergeCommand.html
@@ -26,1048 +26,1115 @@
 018
 019package 
org.apache.hadoop.hbase.backup.impl;
 020
-021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
-022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
-025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
-031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-039
-040import java.io.IOException;
-041import java.net.URI;
-042import java.util.List;
+021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
+022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
+023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP;
+027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC;
+028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST;
+029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+039import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+040import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+041import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
+042import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
 043
-044import 
org.apache.commons.lang3.StringUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.conf.Configured;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import 
org.apache.hadoop.hbase.HBaseConfiguration;
-050import 
org.apache.hadoop.hbase.TableName;
-051import 
org.apache.hadoop.hbase.backup.BackupAdmin;
-052import 
org.apache.hadoop.hbase.backup.BackupInfo;
-053import 

[28/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index aa48364..9549aa5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent? 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent? 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class? extends HMaster masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor? extends 
HMaster c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException 
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner() {
-2946return this.logCleaner;
-2947  }
-2948
-2949  /**
-2950   * @return the underlying snapshot 
manager
-2951   */

[28/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these directories here on open.  
We may be 

[28/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
index 65b5edb..7f42212 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
@@ -1413,7 +1413,7 @@
 1405ArrayListMultimapString, 
ServerName serversByHostname = ArrayListMultimap.create();
 1406for (ServerName server : servers) 
{
 1407  assignments.put(server, new 
ArrayList());
-1408  
serversByHostname.put(server.getHostname(), server);
+1408  
serversByHostname.put(server.getHostnameLowerCase(), server);
 1409}
 1410
 1411// Collection of the hostnames that 
used to have regions
@@ -1434,13 +1434,13 @@
 1426  ServerName oldServerName = 
entry.getValue();
 1427  ListServerName 
localServers = new ArrayList();
 1428  if (oldServerName != null) {
-1429localServers = 
serversByHostname.get(oldServerName.getHostname());
+1429localServers = 
serversByHostname.get(oldServerName.getHostnameLowerCase());
 1430  }
 1431  if (localServers.isEmpty()) {
 1432// No servers on the new cluster 
match up with this hostname, assign randomly, later.
 1433
randomAssignRegions.add(region);
 1434if (oldServerName != null) {
-1435  
oldHostsNoLongerPresent.add(oldServerName.getHostname());
+1435  
oldHostsNoLongerPresent.add(oldServerName.getHostnameLowerCase());
 1436}
 1437  } else if (localServers.size() == 
1) {
 1438// the usual case - one new 
server on same host

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
index 65b5edb..7f42212 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
@@ -1413,7 +1413,7 @@
 1405ArrayListMultimapString, 
ServerName serversByHostname = ArrayListMultimap.create();
 1406for (ServerName server : servers) 
{
 1407  assignments.put(server, new 
ArrayList());
-1408  
serversByHostname.put(server.getHostname(), server);
+1408  
serversByHostname.put(server.getHostnameLowerCase(), server);
 1409}
 1410
 1411// Collection of the hostnames that 
used to have regions
@@ -1434,13 +1434,13 @@
 1426  ServerName oldServerName = 
entry.getValue();
 1427  ListServerName 
localServers = new ArrayList();
 1428  if (oldServerName != null) {
-1429localServers = 
serversByHostname.get(oldServerName.getHostname());
+1429localServers = 
serversByHostname.get(oldServerName.getHostnameLowerCase());
 1430  }
 1431  if (localServers.isEmpty()) {
 1432// No servers on the new cluster 
match up with this hostname, assign randomly, later.
 1433
randomAssignRegions.add(region);
 1434if (oldServerName != null) {
-1435  
oldHostsNoLongerPresent.add(oldServerName.getHostname());
+1435  
oldHostsNoLongerPresent.add(oldServerName.getHostnameLowerCase());
 1436}
 1437  } else if (localServers.size() == 
1) {
 1438// the usual case - one new 
server on same host

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index 65b5edb..7f42212 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -1413,7 +1413,7 @@
 1405ArrayListMultimapString, 
ServerName serversByHostname = ArrayListMultimap.create();
 1406for (ServerName server : servers) 
{
 1407  assignments.put(server, new 
ArrayList());
-1408  

[28/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index a97dfdc..2b1b6c6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2446  throws InterruptedIOException {
-2447ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2448// given that hbase1 can't submit 
the request with Option,
-2449// we return all information to 
client if the list of Option is empty.
-2450if (options.isEmpty()) {
-2451  options = 
EnumSet.allOf(Option.class);

[28/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
index 411bb17..24bd483 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/DefaultMemStore.html
@@ -105,127 +105,122 @@
 097return new 
MemStoreSnapshot(this.snapshotId, this.snapshot);
 098  }
 099
-100  /**
-101   * On flush, how much memory we will 
clear from the active cell set.
-102   *
-103   * @return size of data that is going 
to be flushed from active set
-104   */
-105  @Override
-106  public MemStoreSize getFlushableSize() 
{
-107MemStoreSize snapshotSize = 
getSnapshotSize();
-108return snapshotSize.getDataSize() 
 0 ? snapshotSize
-109: new 
MemStoreSize(active.getMemStoreSize());
-110  }
-111
-112  @Override
-113  protected long keySize() {
-114return this.active.keySize();
-115  }
-116
-117  @Override
-118  protected long heapSize() {
-119return this.active.heapSize();
-120  }
-121
-122  @Override
-123  /*
-124   * Scanners are ordered from 0 (oldest) 
to newest in increasing order.
-125   */
-126  public ListKeyValueScanner 
getScanners(long readPt) throws IOException {
-127ListKeyValueScanner list = 
new ArrayList();
-128addToScanners(active, readPt, 
list);
-129
addToScanners(snapshot.getAllSegments(), readPt, list);
-130return list;
-131  }
-132
-133  @Override
-134  protected ListSegment 
getSegments() throws IOException {
-135ListSegment list = new 
ArrayList(2);
-136list.add(this.active);
-137list.add(this.snapshot);
-138return list;
-139  }
-140
-141  /**
-142   * @param cell Find the row that comes 
after this one.  If null, we return the
-143   * first.
-144   * @return Next row or null if none 
found.
-145   */
-146  Cell getNextRow(final Cell cell) {
-147return getLowest(
-148getNextRow(cell, 
this.active.getCellSet()),
-149getNextRow(cell, 
this.snapshot.getCellSet()));
-150  }
-151
-152  @Override public void 
updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {
-153  }
-154
-155  @Override
-156  public MemStoreSize size() {
-157return new 
MemStoreSize(active.getMemStoreSize());
-158  }
-159
-160  /**
-161   * Check whether anything need to be 
done based on the current active set size
-162   * Nothing need to be done for the 
DefaultMemStore
-163   */
-164  @Override
-165  protected void checkActiveSize() {
-166return;
-167  }
-168
-169  @Override
-170  public long preFlushSeqIDEstimation() 
{
-171return HConstants.NO_SEQNUM;
-172  }
-173
-174  @Override public boolean isSloppy() {
-175return false;
-176  }
-177
-178  /**
-179   * Code to help figure if our 
approximation of object heap sizes is close
-180   * enough.  See hbase-900.  Fills 
memstores then waits so user can heap
-181   * dump and bring up resultant hprof in 
something like jprofiler which
-182   * allows you get 'deep size' on 
objects.
-183   * @param args main args
-184   */
-185  public static void main(String [] args) 
{
-186RuntimeMXBean runtime = 
ManagementFactory.getRuntimeMXBean();
-187LOG.info("vmName=" + 
runtime.getVmName() + ", vmVendor=" +
-188  runtime.getVmVendor() + ", 
vmVersion=" + runtime.getVmVersion());
-189LOG.info("vmInputArguments=" + 
runtime.getInputArguments());
-190DefaultMemStore memstore1 = new 
DefaultMemStore();
-191// TODO: x32 vs x64
-192final int count = 1;
-193byte [] fam = Bytes.toBytes("col");
-194byte [] qf = Bytes.toBytes("umn");
-195byte [] empty = new byte[0];
-196MemStoreSizing memstoreSizing = new 
MemStoreSizing();
+100  @Override
+101  public MemStoreSize getFlushableSize() 
{
+102MemStoreSize mss = 
getSnapshotSize();
+103return mss.getDataSize()  0? mss: 
this.active.getMemStoreSize();
+104  }
+105
+106  @Override
+107  protected long keySize() {
+108return this.active.getDataSize();
+109  }
+110
+111  @Override
+112  protected long heapSize() {
+113return this.active.getHeapSize();
+114  }
+115
+116  @Override
+117  /*
+118   * Scanners are ordered from 0 (oldest) 
to newest in increasing order.
+119   */
+120  public ListKeyValueScanner 
getScanners(long readPt) throws IOException {
+121ListKeyValueScanner list = 
new ArrayList();
+122addToScanners(active, readPt, 
list);
+123
addToScanners(snapshot.getAllSegments(), readPt, list);
+124return list;
+125  }
+126
+127  @Override
+128  protected ListSegment 
getSegments() throws IOException {
+129ListSegment list = new 
ArrayList(2);
+130list.add(this.active);
+131list.add(this.snapshot);
+132return list;
+133  }
+134
+135  /**

[28/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;

[28/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index e1bc325..63e7421 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 
org.apache.hadoop.security.UserGroupInformation;
-136import 

[28/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index e63cd50..d8c0d2b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -422,7 +422,7 @@
 414  }
 415
 416  /**
-417   * {@link #listTables(boolean)}
+417   * {@link 
#listTableDescriptors(boolean)}
 418   */
 419  @Override
 420  public 
CompletableFutureListTableDescriptor 
listTableDescriptors(Pattern pattern,
@@ -3476,16 +3476,79 @@
 3468return future;
 3469  }
 3470
-3471  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
-3472  ListRegionInfo hris) {
-3473return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
-3474  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
-3475controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
-3476(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
-3477resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
-3478  .serverName(serverName).call();
-3479  }
-3480}
+3471  @Override
+3472  public CompletableFutureVoid 
cloneTableSchema(TableName tableName, TableName newTableName,
+3473  boolean preserveSplits) {
+3474CompletableFutureVoid future 
= new CompletableFuture();
+3475
tableExists(tableName).whenComplete(
+3476  (exist, err) - {
+3477if (err != null) {
+3478  
future.completeExceptionally(err);
+3479  return;
+3480}
+3481if (!exist) {
+3482  
future.completeExceptionally(new TableNotFoundException(tableName));
+3483  return;
+3484}
+3485
tableExists(newTableName).whenComplete(
+3486  (exist1, err1) - {
+3487if (err1 != null) {
+3488  
future.completeExceptionally(err1);
+3489  return;
+3490}
+3491if (exist1) {
+3492  
future.completeExceptionally(new TableExistsException(newTableName));
+3493  return;
+3494}
+3495
getDescriptor(tableName).whenComplete(
+3496  (tableDesc, err2) - 
{
+3497if (err2 != null) {
+3498  
future.completeExceptionally(err2);
+3499  return;
+3500}
+3501TableDescriptor 
newTableDesc
+3502= 
TableDescriptorBuilder.copy(newTableName, tableDesc);
+3503if (preserveSplits) {
+3504  
getTableSplits(tableName).whenComplete((splits, err3) - {
+3505if (err3 != null) 
{
+3506  
future.completeExceptionally(err3);
+3507} else {
+3508  
createTable(newTableDesc, splits).whenComplete(
+3509(result, err4) 
- {
+3510  if (err4 != 
null) {
+3511
future.completeExceptionally(err4);
+3512  } else {
+3513
future.complete(result);
+3514  }
+3515});
+3516}
+3517  });
+3518} else {
+3519  
createTable(newTableDesc).whenComplete(
+3520(result, err5) - 
{
+3521  if (err5 != null) 
{
+3522
future.completeExceptionally(err5);
+3523  } else {
+3524
future.complete(result);
+3525  }
+3526});
+3527}
+3528  });
+3529  });
+3530  });
+3531return future;
+3532  }
+3533
+3534  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
+3535  ListRegionInfo hris) {
+3536return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
+3537  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
+3538controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
+3539(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
+3540resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
+3541  .serverName(serverName).call();
+3542  }
+3543}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html

[28/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
index 4f57e7c..338e9a6 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class ThriftUtilities
+public final class ThriftUtilities
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -274,7 +274,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ThriftUtilities
-privateThriftUtilities()
+privateThriftUtilities()
 
 
 
@@ -291,7 +291,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getFromThrift
-public staticGetgetFromThrift(org.apache.hadoop.hbase.thrift2.generated.TGetin)
+public staticGetgetFromThrift(org.apache.hadoop.hbase.thrift2.generated.TGetin)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Creates a Get (HBase) from a 
TGet (Thrift).
 
@@ -312,7 +312,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getsFromThrift
-public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgetsFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TGetin)
+public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgetsFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TGetin)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Converts multiple TGets (Thrift) into a list 
of Gets 
(HBase).
 
@@ -333,7 +333,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 resultFromHBase
-public 
staticorg.apache.hadoop.hbase.thrift2.generated.TResultresultFromHBase(Resultin)
+public 
staticorg.apache.hadoop.hbase.thrift2.generated.TResultresultFromHBase(Resultin)
 Creates a TResult (Thrift) from a Result (HBase).
 
 Parameters:
@@ -349,7 +349,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 resultsFromHBase
-public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TResultresultsFromHBase(Result[]in)
+public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TResultresultsFromHBase(Result[]in)
 Converts multiple Results (HBase) into a list 
of TResults (Thrift).
 
 Parameters:
@@ -367,7 +367,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 putFromThrift
-public staticPutputFromThrift(org.apache.hadoop.hbase.thrift2.generated.TPutin)
+public staticPutputFromThrift(org.apache.hadoop.hbase.thrift2.generated.TPutin)
 Creates a Put (HBase) from a 
TPut (Thrift)
 
 Parameters:
@@ -383,7 +383,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 putsFromThrift
-public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputsFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TPutin)
+public statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputsFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TPutin)
 Converts multiple TPuts (Thrift) into a list 
of Puts 
(HBase).
 
 Parameters:
@@ -401,7 +401,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 deleteFromThrift
-public staticDeletedeleteFromThrift(org.apache.hadoop.hbase.thrift2.generated.TDeletein)
+public staticDeletedeleteFromThrift(org.apache.hadoop.hbase.thrift2.generated.TDeletein)
 Creates a Delete 

[28/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
index 935839d..64dfea4 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
@@ -67,7 +67,7 @@
 059 * To only retrieve columns within a 
specific range of version timestamps, call
 060 * {@link #setTimeRange(long, long) 
setTimeRange}.
 061 * p
-062 * To only retrieve columns with a 
specific timestamp, call {@link #setTimeStamp(long) setTimestamp}
+062 * To only retrieve columns with a 
specific timestamp, call {@link #setTimestamp(long) setTimestamp}
 063 * .
 064 * p
 065 * To limit the number of versions of 
each column to be returned, call {@link #setMaxVersions(int)
@@ -149,7 +149,7 @@
 141  private long maxResultSize = -1;
 142  private boolean cacheBlocks = true;
 143  private boolean reversed = false;
-144  private TimeRange tr = new 
TimeRange();
+144  private TimeRange tr = 
TimeRange.allTime();
 145  private Mapbyte [], 
NavigableSetbyte [] familyMap =
 146new TreeMapbyte [], 
NavigableSetbyte [](Bytes.BYTES_COMPARATOR);
 147  private Boolean asyncPrefetch = null;
@@ -384,869 +384,887 @@
 376   * @see #setMaxVersions()
 377   * @see #setMaxVersions(int)
 378   * @return this
-379   */
-380  public Scan setTimeStamp(long 
timestamp)
-381  throws IOException {
-382try {
-383  tr = new TimeRange(timestamp, 
timestamp+1);
-384} catch(Exception e) {
-385  // This should never happen, unless 
integer overflow or something extremely wrong...
-386  LOG.error("TimeRange failed, likely 
caused by integer overflow. ", e);
-387  throw e;
-388}
-389return this;
-390  }
-391
-392  @Override public Scan 
setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
-393return (Scan) 
super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
-394  }
-395
-396  /**
-397   * Set the start row of the scan.
-398   * p
-399   * If the specified row does not exist, 
the Scanner will start from the next closest row after the
-400   * specified row.
-401   * @param startRow row to start scanner 
at or after
-402   * @return this
-403   * @throws IllegalArgumentException if 
startRow does not meet criteria for a row key (when length
-404   *   exceeds {@link 
HConstants#MAX_ROW_LENGTH})
-405   * @deprecated use {@link 
#withStartRow(byte[])} instead. This method may change the inclusive of
-406   * the stop row to keep 
compatible with the old behavior.
-407   */
-408  @Deprecated
-409  public Scan setStartRow(byte[] 
startRow) {
-410withStartRow(startRow);
-411if 
(ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
-412  // for keeping the old behavior 
that a scan with the same start and stop row is a get scan.
-413  this.includeStopRow = true;
-414}
-415return this;
-416  }
-417
-418  /**
-419   * Set the start row of the scan.
-420   * p
-421   * If the specified row does not exist, 
the Scanner will start from the next closest row after the
-422   * specified row.
-423   * @param startRow row to start scanner 
at or after
-424   * @return this
-425   * @throws IllegalArgumentException if 
startRow does not meet criteria for a row key (when length
-426   *   exceeds {@link 
HConstants#MAX_ROW_LENGTH})
-427   */
-428  public Scan withStartRow(byte[] 
startRow) {
-429return withStartRow(startRow, 
true);
-430  }
-431
-432  /**
-433   * Set the start row of the scan.
-434   * p
-435   * If the specified row does not exist, 
or the {@code inclusive} is {@code false}, the Scanner
-436   * will start from the next closest row 
after the specified row.
-437   * @param startRow row to start scanner 
at or after
-438   * @param inclusive whether we should 
include the start row when scan
-439   * @return this
-440   * @throws IllegalArgumentException if 
startRow does not meet criteria for a row key (when length
-441   *   exceeds {@link 
HConstants#MAX_ROW_LENGTH})
-442   */
-443  public Scan withStartRow(byte[] 
startRow, boolean inclusive) {
-444if (Bytes.len(startRow)  
HConstants.MAX_ROW_LENGTH) {
-445  throw new 
IllegalArgumentException("startRow's length must be less than or equal to "
-446  + HConstants.MAX_ROW_LENGTH + " 
to meet the criteria" + " for a row key.");
-447}
-448this.startRow = startRow;
-449this.includeStartRow = inclusive;
-450return this;
-451  }
-452
-453  /**
-454   * Set the stop row of the scan.
-455   * p
-456   * The scan will include rows that are 
lexicographically less than the provided stopRow.
-457   * p
-458   * bNote:/b When doing 
a filter for a rowKey uPrefix/u use
-459   * {@link #setRowPrefixFilter(byte[])}. 
The 'trailing 

[28/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
index e8d1010..3f1b032 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
@@ -189,130 +189,130 @@
 
 
 Size
+RegionLoad.getBloomFilterSize()
+Deprecated.
+
+
+
+Size
 RegionMetrics.getBloomFilterSize()
 
+
+Size
+RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize()
+
 
 Size
-RegionLoad.getBloomFilterSize()
+ServerLoad.getMaxHeapSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize()
+ServerMetrics.getMaxHeapSize()
 
 
 Size
-ServerMetrics.getMaxHeapSize()
+ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize()
 
 
 Size
-ServerLoad.getMaxHeapSize()
+RegionLoad.getMemStoreSize()
 Deprecated.
 
 
 
 Size
-ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize()
+RegionMetrics.getMemStoreSize()
 
 
 Size
-RegionMetrics.getMemStoreSize()
+RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize()
 
 
 Size
-RegionLoad.getMemStoreSize()
+RegionLoad.getStoreFileIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize()
-
-
-Size
 RegionMetrics.getStoreFileIndexSize()
 TODO: why we pass the same value to different counters? 
Currently, the value from
  getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
  see HRegionServer#createRegionLoad.
 
 
+
+Size
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize()
+
 
 Size
-RegionLoad.getStoreFileIndexSize()
+RegionLoad.getStoreFileRootLevelIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize()
+RegionMetrics.getStoreFileRootLevelIndexSize()
 
 
 Size
-RegionMetrics.getStoreFileRootLevelIndexSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize()
 
 
 Size
-RegionLoad.getStoreFileRootLevelIndexSize()
+RegionLoad.getStoreFileSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize()
+RegionMetrics.getStoreFileSize()
 
 
 Size
-RegionMetrics.getStoreFileSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize()
 
 
 Size
-RegionLoad.getStoreFileSize()
+RegionLoad.getStoreFileUncompressedDataIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize()
+RegionMetrics.getStoreFileUncompressedDataIndexSize()
 
 
 Size
-RegionMetrics.getStoreFileUncompressedDataIndexSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize()
 
 
 Size
-RegionLoad.getStoreFileUncompressedDataIndexSize()
+RegionLoad.getUncompressedStoreFileSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize()
+RegionMetrics.getUncompressedStoreFileSize()
 
 
 Size
-RegionMetrics.getUncompressedStoreFileSize()
+RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize()
 
 
 Size
-RegionLoad.getUncompressedStoreFileSize()
+ServerLoad.getUsedHeapSize()
 Deprecated.
 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize()
-
-
-Size
 ServerMetrics.getUsedHeapSize()
 
-
-Size
-ServerLoad.getUsedHeapSize()
-Deprecated.
-
-
 
 Size
 ServerMetricsBuilder.ServerMetricsImpl.getUsedHeapSize()

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
index 72d579d..63833f7 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
@@ -122,11 +122,11 @@
 
 
 TableDescriptors
-MasterServices.getTableDescriptors()
+HMaster.getTableDescriptors()
 
 
 TableDescriptors
-HMaster.getTableDescriptors()
+MasterServices.getTableDescriptors()
 
 
 



[28/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index c36dd6e..d4d6c7f 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -239,15 +239,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-ServerMetricsBuilder.serverName
+HRegionLocation.serverName
 
 
 private ServerName
-ServerMetricsBuilder.ServerMetricsImpl.serverName
+ServerMetricsBuilder.serverName
 
 
 private ServerName
-HRegionLocation.serverName
+ServerMetricsBuilder.ServerMetricsImpl.serverName
 
 
 
@@ -306,7 +306,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName()
+ClusterMetrics.getMasterName()
+Returns detailed information about the current master ServerName.
+
 
 
 ServerName
@@ -316,15 +318,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetrics.getMasterName()
-Returns detailed information about the current master ServerName.
-
+ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName()
 
 
 ServerName
-ServerLoad.getServerName()
-Deprecated.
-
+HRegionLocation.getServerName()
 
 
 ServerName
@@ -332,11 +330,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ServerMetricsBuilder.ServerMetricsImpl.getServerName()
+ServerLoad.getServerName()
+Deprecated.
+
 
 
 ServerName
-HRegionLocation.getServerName()
+ServerMetricsBuilder.ServerMetricsImpl.getServerName()
 
 
 ServerName
@@ -405,7 +405,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
-ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames()
+ClusterMetrics.getBackupMasterNames()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
@@ -415,7 +415,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
-ClusterMetrics.getBackupMasterNames()
+ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
@@ -428,7 +428,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
-ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames()
+ClusterMetrics.getDeadServerNames()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
@@ -438,7 +438,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
-ClusterMetrics.getDeadServerNames()
+ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames()
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,ServerLoad
@@ -448,7 +448,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,ServerMetrics
-ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics()
+ClusterMetrics.getLiveServerMetrics()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,ServerMetrics
@@ -458,7 +458,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,ServerMetrics
-ClusterMetrics.getLiveServerMetrics()
+ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics()
 
 
 static PairRegionInfo,ServerName
@@ -857,31 +857,31 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-FastFailInterceptorContext.server
+AsyncRequestFutureImpl.SingleServerRequestRunnable.server
 
 
 private ServerName

[28/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index ecf500c..0cd5a4e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -238,8355 +238,8368 @@
 230  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
 231  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 232
-233  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
-234  
"hbase.regionserver.minibatch.size";
-235  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
-236
-237  /**
-238   * This is the global default value for 
durability. All tables/mutations not
-239   * defining a durability or using 
USE_DEFAULT will default to this value.
-240   */
-241  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+233  /**
+234   * This is the global default value for 
durability. All tables/mutations not
+235   * defining a durability or using 
USE_DEFAULT will default to this value.
+236   */
+237  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+238
+239  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
+240  
"hbase.regionserver.minibatch.size";
+241  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
 242
-243  final AtomicBoolean closed = new 
AtomicBoolean(false);
-244
-245  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-246   * want to do while in closing state; 
e.g. like offer this region up to the
-247   * master as a region to close if the 
carrying regionserver is overloaded.
-248   * Once set, it is never cleared.
-249   */
-250  final AtomicBoolean closing = new 
AtomicBoolean(false);
-251
-252  /**
-253   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-254   * less that this sequence id.
-255   */
-256  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-257
-258  /**
-259   * Record the sequence id of last flush 
operation. Can be in advance of
-260   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-261   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-262   */
-263  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-264
-265  /**
-266   * The sequence id of the last replayed 
open region event from the primary region. This is used
-267   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-268   * replication.
-269   */
-270  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-271  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-272
-273  
//
-274  // Members
-275  
//
-276
-277  // map from a locked row to the context 
for that lock including:
-278  // - CountDownLatch for threads waiting 
on that row
-279  // - the thread that owns the lock 
(allow reentrancy)
-280  // - reference count of (reentrant) 
locks held by the thread
-281  // - the row itself
-282  private final 
ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
-283  new ConcurrentHashMap();
-284
-285  protected final Mapbyte[], 
HStore stores =
-286  new 
ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
+243  public static final String 
WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
+244  public static final boolean 
DEFAULT_WAL_HSYNC = false;
+245
+246  final AtomicBoolean closed = new 
AtomicBoolean(false);
+247
+248  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+249   * want to do while in closing state; 
e.g. like offer this region up to the
+250   * master as a region to close if the 
carrying regionserver is overloaded.
+251   * Once set, it is never cleared.
+252   */
+253  final AtomicBoolean closing = new 
AtomicBoolean(false);
+254
+255  /**
+256   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+257   * less that this sequence id.
+258   */
+259  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+260
+261  /**
+262   * Record the sequence id of last flush 
operation. Can be in advance of
+263   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+264   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+265   */
+266  private volatile long lastFlushOpSeqId 
= 

[28/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be 

[28/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
index 3377afb..78a22ea 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -262,163 +262,169 @@
 
 
 
+CoprocessorDescriptor
+
+CoprocessorDescriptor contains the details about how to 
build a coprocessor.
+
+
+
 HBaseAdmin.ProcedureFuture.WaitForStateCallable
 
 
-
+
 MasterKeepAliveConnection
 
 A KeepAlive connection is not physically closed immediately 
after the close,
   but rather kept alive for a few minutes.
 
 
-
+
 MetricsConnection.NewMetricT
 
 A lambda for dispatching to the appropriate metric factory 
method
 
 
-
+
 NonceGenerator
 
 NonceGenerator interface.
 
 
-
+
 RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
 
 
-
+
 RawAsyncHBaseAdmin.ConverterD,S
 
 
-
+
 RawAsyncHBaseAdmin.MasterRpcCallRESP,REQ
 
 
-
+
 RawAsyncHBaseAdmin.TableOperator
 
 
-
+
 RawAsyncTableImpl.ConverterD,I,S
 
 
-
+
 RawAsyncTableImpl.NoncedConverterD,I,S
 
 
-
+
 RawAsyncTableImpl.RpcCallRESP,REQ
 
 
-
+
 RegionInfo
 
 Information about a region.
 
 
-
+
 RegionLocator
 
 Used to view region location information for a single HBase 
table.
 
 
-
+
 RequestController
 
 An interface for client request scheduling algorithm.
 
 
-
+
 RequestController.Checker
 
 Picks up the valid data.
 
 
-
+
 ResultScanner
 
 Interface for client-side scanning.
 
 
-
+
 RetryingCallableT
 
 A CallableT that will be retried.
 
 
-
+
 Row
 
 Has a row.
 
 
-
+
 RowAccessT
 
 Provide a way to access the inner buffer.
 
 
-
+
 RpcRetryingCallerT
 
 
-
+
 ScanResultCache
 
 Used to separate the row constructing logic.
 
 
-
+
 ScanResultConsumer
 
 Receives Result for an asynchronous 
scan.
 
 
-
+
 ScanResultConsumerBase
 
 The base interface for scan result consumer.
 
 
-
+
 ServiceCallerS,R
 
 Delegate to a protobuf rpc call.
 
 
-
+
 SimpleRequestController.RowChecker
 
 Provide a way to control the flow of rows iteration.
 
 
-
+
 StatisticTrackable
 
 Parent interface for an object to get updates about 
per-region statistics.
 
 
-
+
 Table
 
 Used to communicate with a single HBase table.
 
 
-
+
 Table.CheckAndMutateBuilder
 
 A helper class for sending checkAndMutate request.
 
 
-
+
 TableBuilder
 
 For creating Table 
instance.
 
 
-
+
 TableDescriptor
 
 TableDescriptor contains the details about an HBase table 
such as the descriptors of
@@ -427,7 +433,7 @@
  when the region split should occur, coprocessors associated with it 
etc...
 
 
-
+
 ZKAsyncRegistry.ConverterT
 
 
@@ -817,6 +823,16 @@
 
 
 
+CoprocessorDescriptorBuilder
+
+Used to build the CoprocessorDescriptor
+
+
+
+CoprocessorDescriptorBuilder.CoprocessorDescriptorImpl
+
+
+
 Cursor
 
 Scan cursor to tell client where server is scanning

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 15773e2..be313d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -196,6 +196,8 @@
 org.apache.hadoop.hbase.client.ConnectionImplementation.ServerErrorTracker
 org.apache.hadoop.hbase.client.ConnectionImplementation.ServerErrorTracker.ServerErrors
 org.apache.hadoop.hbase.client.ConnectionUtils
+org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder
+org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder.CoprocessorDescriptorImpl
 (implements org.apache.hadoop.hbase.client.CoprocessorDescriptor)
 org.apache.hadoop.hbase.client.Cursor
 org.apache.hadoop.hbase.client.DelayingRunner (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
 org.apache.hadoop.hbase.client.FailureInfo
@@ -500,6 +502,7 @@
 org.apache.hadoop.hbase.client.Row
 
 
+org.apache.hadoop.hbase.client.CoprocessorDescriptor
 org.apache.hadoop.hbase.client.HBaseAdmin.ProcedureFuture.WaitForStateCallable
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in java.lang">IterableT
 
@@ -547,24 +550,24 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, 

[28/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 6ccfee6..fa0c083 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1304,7 +1304,7 @@ implements 
 
 pendingAssignQueue
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
pendingAssignQueue
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
pendingAssignQueue
 
 
 
@@ -1313,7 +1313,7 @@ implements 
 
 assignQueueLock
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
 
 
 
@@ -1322,7 +1322,7 @@ implements 
 
 assignQueueFullCond
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
 
 
 
@@ -1803,7 +1803,12 @@ implements 
 
 moveAsync
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in 
java.util.concurrent">Futurebyte[]moveAsync(RegionPlanregionPlan)
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in 
java.util.concurrent">Futurebyte[]moveAsync(RegionPlanregionPlan)
+ throws HBaseIOException
+
+Throws:
+HBaseIOException
+
 
 
 
@@ -1899,7 +1904,12 @@ implements 
 
 createReopenProcedures
-publicMoveRegionProcedure[]createReopenProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionRegionInforegionInfo)
+publicMoveRegionProcedure[]createReopenProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionRegionInforegionInfo)
+ throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+
+Throws:
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+
 
 
 
@@ -1908,7 +1918,7 @@ implements 
 
 createUnassignProcedures
-publicUnassignProcedure[]createUnassignProcedures(TableNametableName)
+publicUnassignProcedure[]createUnassignProcedures(TableNametableName)
 Called by things like DisableTableProcedure to get a list 
of UnassignProcedure
  to unassign the regions of the table.
 
@@ -1919,7 +1929,7 @@ implements 
 
 createAssignProcedure
-publicAssignProcedurecreateAssignProcedure(RegionInforegionInfo)
+publicAssignProcedurecreateAssignProcedure(RegionInforegionInfo)
 
 
 
@@ -1928,7 +1938,7 @@ implements 
 
 createAssignProcedure
-publicAssignProcedurecreateAssignProcedure(RegionInforegionInfo,
+publicAssignProcedurecreateAssignProcedure(RegionInforegionInfo,
  ServerNametargetServer)
 
 
@@ -1938,7 +1948,7 @@ implements 
 
 createUnassignProcedure
-UnassignProcedurecreateUnassignProcedure(RegionInforegionInfo,
+UnassignProcedurecreateUnassignProcedure(RegionInforegionInfo,
   ServerNamedestinationServer,
   booleanforce)
 
@@ -1949,7 +1959,7 @@ implements 
 
 createUnassignProcedure
-UnassignProcedurecreateUnassignProcedure(RegionInforegionInfo,
+UnassignProcedurecreateUnassignProcedure(RegionInforegionInfo,
   ServerNamedestinationServer,
   booleanforce,
   
booleanremoveAfterUnassigning)
@@ -1961,7 +1971,12 @@ implements 
 
 createMoveRegionProcedure
-publicMoveRegionProcedurecreateMoveRegionProcedure(RegionPlanplan)

[28/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
index 7eb7661..1d50582 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
@@ -104,13 +104,13 @@
 
 
 void
-MasterServices.checkTableModifiable(TableNametableName)
-Check table is modifiable; i.e.
-
+HMaster.checkTableModifiable(TableNametableName)
 
 
 void
-HMaster.checkTableModifiable(TableNametableName)
+MasterServices.checkTableModifiable(TableNametableName)
+Check table is modifiable; i.e.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
index 25d874c..6194cbc 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
@@ -170,13 +170,13 @@
 
 
 void
-MasterServices.checkTableModifiable(TableNametableName)
-Check table is modifiable; i.e.
-
+HMaster.checkTableModifiable(TableNametableName)
 
 
 void
-HMaster.checkTableModifiable(TableNametableName)
+MasterServices.checkTableModifiable(TableNametableName)
+Check table is modifiable; i.e.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
index 22b3482..067aa54 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
@@ -243,10 +243,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag
-PrivateCellUtil.getTags(Cellcell)
-
-
-static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag
 CellUtil.getTags(Cellcell)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
@@ -254,6 +250,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag
+PrivateCellUtil.getTags(Cellcell)
+
 
 static https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorTag
 CellUtil.tagsIterator(byte[]tags,
@@ -395,11 +395,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Cell
-PrivateCellUtil.createCell(Cellcell,
-  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
-
-
-static Cell
 CellUtil.createCell(Cellcell,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
 Deprecated.
@@ -407,6 +402,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+static Cell
+PrivateCellUtil.createCell(Cellcell,
+  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
+
 
 static byte[]
 TagUtil.fromList(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
@@ -415,17 +415,17 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-ExtendedCellBuilder
-ExtendedCellBuilderImpl.setTags(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
-
-
 RawCellBuilder
 RawCellBuilder.setTags(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setTags(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
 
+
+ExtendedCellBuilder

[28/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
index 3f1b032..e8d1010 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
@@ -189,130 +189,130 @@
 
 
 Size
-RegionLoad.getBloomFilterSize()
-Deprecated.
-
-
-
-Size
 RegionMetrics.getBloomFilterSize()
 
-
-Size
-RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize()
-
 
 Size
-ServerLoad.getMaxHeapSize()
+RegionLoad.getBloomFilterSize()
 Deprecated.
 
 
 
 Size
-ServerMetrics.getMaxHeapSize()
+RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize()
 
 
 Size
-ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize()
+ServerMetrics.getMaxHeapSize()
 
 
 Size
-RegionLoad.getMemStoreSize()
+ServerLoad.getMaxHeapSize()
 Deprecated.
 
 
 
 Size
-RegionMetrics.getMemStoreSize()
+ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize()
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize()
+RegionMetrics.getMemStoreSize()
 
 
 Size
-RegionLoad.getStoreFileIndexSize()
+RegionLoad.getMemStoreSize()
 Deprecated.
 
 
 
 Size
+RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize()
+
+
+Size
 RegionMetrics.getStoreFileIndexSize()
 TODO: why we pass the same value to different counters? 
Currently, the value from
  getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
  see HRegionServer#createRegionLoad.
 
 
-
-Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize()
-
 
 Size
-RegionLoad.getStoreFileRootLevelIndexSize()
+RegionLoad.getStoreFileIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetrics.getStoreFileRootLevelIndexSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize()
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize()
+RegionMetrics.getStoreFileRootLevelIndexSize()
 
 
 Size
-RegionLoad.getStoreFileSize()
+RegionLoad.getStoreFileRootLevelIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetrics.getStoreFileSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize()
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize()
+RegionMetrics.getStoreFileSize()
 
 
 Size
-RegionLoad.getStoreFileUncompressedDataIndexSize()
+RegionLoad.getStoreFileSize()
 Deprecated.
 
 
 
 Size
-RegionMetrics.getStoreFileUncompressedDataIndexSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize()
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize()
+RegionMetrics.getStoreFileUncompressedDataIndexSize()
 
 
 Size
-RegionLoad.getUncompressedStoreFileSize()
+RegionLoad.getStoreFileUncompressedDataIndexSize()
 Deprecated.
 
 
 
 Size
-RegionMetrics.getUncompressedStoreFileSize()
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize()
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize()
+RegionMetrics.getUncompressedStoreFileSize()
 
 
 Size
-ServerLoad.getUsedHeapSize()
+RegionLoad.getUncompressedStoreFileSize()
 Deprecated.
 
 
 
 Size
+RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize()
+
+
+Size
 ServerMetrics.getUsedHeapSize()
 
+
+Size
+ServerLoad.getUsedHeapSize()
+Deprecated.
+
+
 
 Size
 ServerMetricsBuilder.ServerMetricsImpl.getUsedHeapSize()

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
index 63833f7..72d579d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
@@ -122,11 +122,11 @@
 
 
 TableDescriptors
-HMaster.getTableDescriptors()
+MasterServices.getTableDescriptors()
 
 
 TableDescriptors
-MasterServices.getTableDescriptors()
+HMaster.getTableDescriptors()
 
 
 



[28/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index 98346cf..be292c5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -479,9 +479,10 @@ implements 
 void
-logPositionAndCleanOldLogs(org.apache.hadoop.fs.Pathlog,
+logPositionAndCleanOldLogs(org.apache.hadoop.fs.Pathlog,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
   longposition,
+  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonglastSeqIds,
   booleanqueueRecovered)
 This method will log the current position to storage.
 
@@ -987,7 +988,7 @@ implements 
+
 
 
 
@@ -996,6 +997,7 @@ implements logPositionAndCleanOldLogs(org.apache.hadoop.fs.Pathlog,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
longposition,
+   https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonglastSeqIds,
booleanqueueRecovered)
 This method will log the current position to storage. And 
also clean old logs from the
  replication queue.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
index 8d26214..cc41ea4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum ReplicationSourceShipper.WorkerState
+public static enum ReplicationSourceShipper.WorkerState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumReplicationSourceShipper.WorkerState
 
 
@@ -213,7 +213,7 @@ the order they are declared.
 
 
 RUNNING
-public static finalReplicationSourceShipper.WorkerState
 RUNNING
+public static finalReplicationSourceShipper.WorkerState
 RUNNING
 
 
 
@@ -222,7 +222,7 @@ the order they are declared.
 
 
 STOPPED
-public static finalReplicationSourceShipper.WorkerState
 STOPPED
+public static finalReplicationSourceShipper.WorkerState
 STOPPED
 
 
 
@@ -231,7 +231,7 @@ the order they are declared.
 
 
 FINISHED
-public static finalReplicationSourceShipper.WorkerState
 FINISHED
+public static finalReplicationSourceShipper.WorkerState
 FINISHED
 
 
 
@@ -248,7 +248,7 @@ the order they are declared.
 
 
 values
-public staticReplicationSourceShipper.WorkerState[]values()
+public staticReplicationSourceShipper.WorkerState[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -268,7 +268,7 @@ for (ReplicationSourceShipper.WorkerState c : 
ReplicationSourceShipper.WorkerSta
 
 
 valueOf
-public staticReplicationSourceShipper.WorkerStatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticReplicationSourceShipper.WorkerStatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 789536f..1957877 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -164,7 +164,7 @@
 
 
 static HRegionInfo
-HRegionInfo.parseFrom(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamin)
+HRegionInfo.parseFrom(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamin)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfo.parseFrom(DataInputStream).
@@ -201,7 +201,7 @@
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 HRegionInfo.parseDelimitedFrom(byte[]bytes,
   intoffset,
   intlength)
@@ -258,7 +258,7 @@
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 HRegionInfo.getRegionNameAsStringForDisplay(HRegionInfohri,

org.apache.hadoop.conf.Configurationconf)
 Deprecated.
@@ -280,7 +280,7 @@
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 HRegionInfo.getShortNameToLog(HRegionInfo...hris)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
@@ -318,8 +318,8 @@
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-HRegionInfo.getShortNameToLog(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfohris)
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+HRegionInfo.getShortNameToLog(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfohris)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfo.getShortNameToLog(List))}.
@@ -355,7 +355,7 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 Admin.getOnlineRegions(ServerNamesn)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
@@ -365,7 +365,7 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 Admin.getTableRegions(TableNametableName)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index 05c089c..0945890 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -149,19 +149,19 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
 RegionLocator.getAllRegionLocations()
 Retrieves all of the regions associated with this 
table.
 
 
 
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 053f236..5ab0fb3 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -106,6 +106,7 @@
 org.apache.hadoop.hbase.filter.ColumnPaginationFilter
 org.apache.hadoop.hbase.filter.ColumnPrefixFilter
 org.apache.hadoop.hbase.filter.ColumnRangeFilter
+org.apache.hadoop.hbase.filter.ColumnValueFilter
 org.apache.hadoop.hbase.filter.CompareFilter
 
 org.apache.hadoop.hbase.filter.DependentColumnFilter
@@ -182,14 +183,14 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.Filter.ReturnCode
+org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
 org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
-org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 org.apache.hadoop.hbase.filter.FilterList.Operator
-org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
-org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
+org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/filter/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-use.html
index 7094646..271a1f9 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-use.html
@@ -279,18 +279,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+ColumnValueFilter
+Different from SingleColumnValueFilter which 
returns an entire row
+ when specified condition is matched, ColumnValueFilter return the 
matched cell only.
+
+
+
 CompareFilter
 This is a generic filter to be used to filter by 
comparison.
 
 
-
+
 CompareFilter.CompareOp
 Deprecated.
 since 2.0.0. Will be 
removed in 3.0.0. Use CompareOperator 
instead.
 
 
 
-
+
 DependentColumnFilter
 A filter for adding inter-column timestamp matching
  Only cells with a correspondingly timestamped entry in
@@ -299,197 +305,197 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  full rows for correct filtering
 
 
-
+
 FamilyFilter
 
  This filter is used to filter based on the column family.
 
 
-
+
 Filter
 Interface for row and column filters directly applied 
within the regionserver.
 
 
-
+
 Filter.ReturnCode
 Return codes for filterValue().
 
 
-
+
 FilterBase
 Abstract base class to help you implement new Filters.
 
 
-
+
 FilterList
 Implementation of Filter that represents an 
ordered List of Filters which will be
  evaluated with a specified boolean operator FilterList.Operator.MUST_PASS_ALL
 (AND) or
  FilterList.Operator.MUST_PASS_ONE
 (OR).
 
 
-
+
 FilterList.Operator
 set operator
 
 
-
+
 FilterListBase
 Base class for FilterList.
 
 
-
+
 FilterWrapper
 This is a Filter wrapper class which is used in the server 
side.
 
 
-
+
 FilterWrapper.FilterRowRetCode
 
-
+
 FirstKeyOnlyFilter
 A filter that will only return the first KV from each 
row.
 
 
-
+
 FirstKeyValueMatchingQualifiersFilter
 Deprecated.
 Deprecated in 2.0. See 
HBASE-13347
 
 
 
-
+
 FuzzyRowFilter
 This is optimized version of a standard FuzzyRowFilter 
Filters data based on fuzzy row key.
 
 
-
+
 FuzzyRowFilter.Order
 Abstracts directional comparisons based on scan 
direction.
 
 
-
+
 FuzzyRowFilter.RowTracker
 If we have multiple fuzzy keys, row tracker should improve 
overall performance.
 
 
-
+
 FuzzyRowFilter.SatisfiesCode
 
-
+
 InclusiveStopFilter
 A Filter that stops after the given row.
 
 
-
+
 KeyOnlyFilter
 A filter that will only return the key component of each KV 
(the value will
  be rewritten as empty).
 
 
-
+
 LongComparator
 A long comparator which numerical compares against the 
specified byte array
 
 
-
+
 MultipleColumnPrefixFilter
 This filter is used for selecting only those keys with 
columns that matches
  a particular prefix.
 
 
-
+
 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
codecell/code
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstOnRowCol(final Cell cell) {
-2857if (cell instanceof 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
index 66f3dc6..44bd3a6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
@@ -24,486 +24,461 @@
 016package org.apache.hadoop.hbase.client;
 017
 018import static 
org.apache.hadoop.hbase.client.BufferedMutatorParams.UNSET;
-019import java.io.IOException;
-020import java.io.InterruptedIOException;
-021import java.util.Collections;
-022import java.util.Iterator;
-023import java.util.List;
-024import 
java.util.NoSuchElementException;
-025import java.util.Timer;
-026import java.util.TimerTask;
-027import 
java.util.concurrent.ConcurrentLinkedQueue;
-028import 
java.util.concurrent.ExecutorService;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicInteger;
-031import 
java.util.concurrent.atomic.AtomicLong;
-032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.TableName;
-034import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import 
org.apache.yetus.audience.InterfaceStability;
-037import org.slf4j.Logger;
-038import org.slf4j.LoggerFactory;
-039
-040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-041
-042/**
-043 * p
-044 * Used to communicate with a single 
HBase table similar to {@link Table}
-045 * but meant for batched, potentially 
asynchronous puts. Obtain an instance from
-046 * a {@link Connection} and call {@link 
#close()} afterwards. Provide an alternate
-047 * to this implementation by setting 
{@link BufferedMutatorParams#implementationClassName(String)}
-048 * or by setting alternate classname via 
the key {} in Configuration.
-049 * /p
-050 *
-051 * p
-052 * While this can be used across threads, 
great care should be used when doing so.
-053 * Errors are global to the buffered 
mutator and the Exceptions can be thrown on any
-054 * thread that causes the flush for 
requests.
-055 * /p
-056 *
-057 * @see ConnectionFactory
-058 * @see Connection
-059 * @since 1.0.0
-060 */
-061@InterfaceAudience.Private
-062@InterfaceStability.Evolving
-063public class BufferedMutatorImpl 
implements BufferedMutator {
-064
-065  private static final Logger LOG = 
LoggerFactory.getLogger(BufferedMutatorImpl.class);
-066
-067  private final ExceptionListener 
listener;
-068
-069  private final TableName tableName;
-070
-071  private final Configuration conf;
-072  private final 
ConcurrentLinkedQueueMutation writeAsyncBuffer = new 
ConcurrentLinkedQueue();
-073  private final AtomicLong 
currentWriteBufferSize = new AtomicLong(0);
-074  /**
-075   * Count the size of {@link 
BufferedMutatorImpl#writeAsyncBuffer}.
-076   * The {@link 
ConcurrentLinkedQueue#size()} is NOT a constant-time operation.
-077   */
-078  private final AtomicInteger 
undealtMutationCount = new AtomicInteger(0);
-079  private final long writeBufferSize;
-080
-081  private final AtomicLong 
writeBufferPeriodicFlushTimeoutMs = new AtomicLong(0);
-082  private final AtomicLong 
writeBufferPeriodicFlushTimerTickMs =
-083  new 
AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
-084  private Timer 
writeBufferPeriodicFlushTimer = null;
-085
-086  private final int maxKeyValueSize;
-087  private final ExecutorService pool;
-088  private final AtomicInteger 
rpcTimeout;
-089  private final AtomicInteger 
operationTimeout;
-090  private final boolean 
cleanupPoolOnClose;
-091  private volatile boolean closed = 
false;
-092  private final AsyncProcess ap;
-093
-094  @VisibleForTesting
-095  BufferedMutatorImpl(ClusterConnection 
conn, BufferedMutatorParams params, AsyncProcess ap) {
-096if (conn == null || conn.isClosed()) 
{
-097  throw new 
IllegalArgumentException("Connection is null or closed.");
-098}
-099this.tableName = 
params.getTableName();
-100this.conf = 
conn.getConfiguration();
-101this.listener = 
params.getListener();
-102if (params.getPool() == null) {
-103  this.pool = 
HTable.getDefaultExecutor(conf);
-104  cleanupPoolOnClose = true;
-105} else {
-106  this.pool = params.getPool();
-107  cleanupPoolOnClose = false;
-108}
-109ConnectionConfiguration tableConf = 
new ConnectionConfiguration(conf);
-110this.writeBufferSize =
-111params.getWriteBufferSize() 
!= UNSET ?
-112params.getWriteBufferSize() : 
tableConf.getWriteBufferSize();
-113
-114// Set via the setter because it does 
value validation and starts/stops the TimerTask
-115long 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index b0d9cb7..0319d89 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -495,7 +495,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -504,7 +504,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -944,9 +944,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncAdmin.getRegions(ServerNameserverName)
-Get all the online regions on a region server.
-
+AsyncHBaseAdmin.getRegions(ServerNameserverName)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
@@ -955,22 +953,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 HBaseAdmin.getRegions(ServerNamesn)
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+AsyncAdmin.getRegions(ServerNameserverName)
+Get all the online regions on a region server.
+
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncHBaseAdmin.getRegions(ServerNameserverName)
+RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncAdmin.getRegions(TableNametableName)
-Get the regions of a given table.
-
+AsyncHBaseAdmin.getRegions(TableNametableName)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
@@ -979,16 +977,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-RawAsyncHBaseAdmin.getRegions(TableNametableName)
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 HBaseAdmin.getRegions(TableNametableName)
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
index 9f3035a..f2c8f10 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
@@ -121,26 +121,26 @@
 
 
 AsyncBufferedMutatorBuilder
+AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName)
+
+
+AsyncBufferedMutatorBuilder
 AsyncConnection.getBufferedMutatorBuilder(TableNametableName)
 Returns an AsyncBufferedMutatorBuilder 
for creating AsyncBufferedMutator.
 
 
-
+
 AsyncBufferedMutatorBuilder
-AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName)
+AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName,
+ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 
-
+
 AsyncBufferedMutatorBuilder
 AsyncConnection.getBufferedMutatorBuilder(TableNametableName,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 Returns an AsyncBufferedMutatorBuilder 
for creating AsyncBufferedMutator.
 
 
-
-AsyncBufferedMutatorBuilder
-AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
-
 
 AsyncBufferedMutatorBuilder
 AsyncBufferedMutatorBuilderImpl.setMaxAttempts(intmaxAttempts)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
index 024eca4..5ba2deb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
@@ -106,11 +106,11 @@
 
 
 private AsyncConnectionImpl
-AsyncClientScanner.conn
+RawAsyncTableImpl.conn
 
 
 private AsyncConnectionImpl
-AsyncRpcRetryingCallerFactory.conn
+AsyncBatchRpcRetryingCaller.conn
 
 
 private AsyncConnectionImpl
@@ -118,19 +118,19 @@
 
 
 private AsyncConnectionImpl
-RawAsyncTableImpl.conn
+RegionCoprocessorRpcChannelImpl.conn
 
 
-private AsyncConnectionImpl
-RegionCoprocessorRpcChannelImpl.conn
+protected AsyncConnectionImpl
+AsyncRpcRetryingCaller.conn
 
 
 private AsyncConnectionImpl
-AsyncBatchRpcRetryingCaller.conn
+AsyncClientScanner.conn
 
 
-protected AsyncConnectionImpl
-AsyncRpcRetryingCaller.conn
+private AsyncConnectionImpl
+AsyncRpcRetryingCallerFactory.conn
 
 
 private AsyncConnectionImpl

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
index d6b1759..e71ca45 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
@@ -105,13 +105,13 @@
 
 
 
-private AsyncMasterRequestRpcRetryingCaller.CallableT
-AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.callable
-
-
 private AsyncMasterRequestRpcRetryingCaller.CallableT
 AsyncMasterRequestRpcRetryingCaller.callable
 
+
+private AsyncMasterRequestRpcRetryingCaller.CallableT
+AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.callable
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
index f31564e..60fbcff 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
+++ 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
index 3c2959e..c233c17 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
@@ -405,16 +405,6 @@ service.
 
 
 boolean
-HTable.checkAndDelete(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  byte[]value,
-  Deletedelete)
-Deprecated.
-
-
-
-boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
@@ -425,18 +415,17 @@ service.
 
 
 
-
+
 boolean
-HTable.checkAndDelete(byte[]row,
+HTable.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
-  CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
 Deprecated.
 
 
-
+
 boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
@@ -449,18 +438,18 @@ service.
 
 
 
-
+
 boolean
-HTable.checkAndDelete(byte[]row,
+HTable.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
-  CompareOperatorop,
+  CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
 Deprecated.
 
 
-
+
 boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
@@ -473,29 +462,40 @@ service.
 
 
 
+
+boolean
+HTable.checkAndDelete(byte[]row,
+  byte[]family,
+  byte[]qualifier,
+  CompareOperatorop,
+  byte[]value,
+  Deletedelete)
+Deprecated.
+
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncTableImpl.delete(Deletedelete)
+AsyncTable.delete(Deletedelete)
+Deletes the specified cells/row.
+
 
 
 void
-HTable.delete(Deletedelete)
-
-
-void
 Table.delete(Deletedelete)
 Deletes the specified cells/row.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncTableImpl.delete(Deletedelete)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTable.delete(Deletedelete)
-Deletes the specified cells/row.
-
+RawAsyncTableImpl.delete(Deletedelete)
+
+
+void
+HTable.delete(Deletedelete)
 
 
 private boolean
@@ -508,19 +508,19 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+AsyncTable.CheckAndMutateBuilder.thenDelete(Deletedelete)
 
 
 boolean
-HTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+Table.CheckAndMutateBuilder.thenDelete(Deletedelete)
 
 
-boolean
-Table.CheckAndMutateBuilder.thenDelete(Deletedelete)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.CheckAndMutateBuilder.thenDelete(Deletedelete)
+boolean
+HTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
 
 
 
@@ -533,27 +533,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 115dbc0..84b554e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -2055,119 +2055,119 @@ service.
 
 
 private TableName
-RegionCoprocessorRpcChannel.table
+SnapshotDescription.table
 
 
 private TableName
-SnapshotDescription.table
+RegionCoprocessorRpcChannel.table
 
 
 private TableName
-HRegionLocator.tableName
+RawAsyncTableImpl.tableName
 
 
 private TableName
-ScannerCallableWithReplicas.tableName
+RegionServerCallable.tableName
 
 
 protected TableName
-ClientScanner.tableName
+RegionAdminServiceCallable.tableName
 
 
 private TableName
-AsyncClientScanner.tableName
+BufferedMutatorImpl.tableName
 
 
 private TableName
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName
+AsyncProcessTask.tableName
 
 
 private TableName
-AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName
+AsyncProcessTask.Builder.tableName
 
 
 private TableName
-RegionInfoBuilder.tableName
+AsyncRequestFutureImpl.tableName
 
 
-private TableName
-RegionInfoBuilder.MutableRegionInfo.tableName
+protected TableName
+TableBuilderBase.tableName
 
 
 private TableName
-RawAsyncTableImpl.tableName
+AsyncBatchRpcRetryingCaller.tableName
 
 
 private TableName
-RegionCoprocessorRpcChannelImpl.tableName
+RegionInfoBuilder.tableName
 
 
 private TableName
-AsyncTableRegionLocatorImpl.tableName
+RegionInfoBuilder.MutableRegionInfo.tableName
 
 
-protected TableName
-RegionAdminServiceCallable.tableName
+private TableName
+HTable.tableName
 
 
 private TableName
-HTable.tableName
+TableState.tableName
 
 
-private TableName
-BufferedMutatorImpl.tableName
+protected TableName
+RpcRetryingCallerWithReadReplicas.tableName
 
 
-private TableName
-AsyncBatchRpcRetryingCaller.tableName
+protected TableName
+AsyncTableBuilderBase.tableName
 
 
 private TableName
-BufferedMutatorParams.tableName
+AsyncSingleRequestRpcRetryingCaller.tableName
 
 
 private TableName
-HBaseAdmin.TableFuture.tableName
+ScannerCallableWithReplicas.tableName
 
 
-private TableName
-AsyncRequestFutureImpl.tableName
+protected TableName
+RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName
 
 
 private TableName
-AsyncProcessTask.tableName
+AsyncTableRegionLocatorImpl.tableName
 
 
 private TableName
-AsyncProcessTask.Builder.tableName
+HBaseAdmin.TableFuture.tableName
 
 
-protected TableName
-RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName
+private TableName
+RegionCoprocessorRpcChannelImpl.tableName
 
 
-private TableName
-RegionServerCallable.tableName
+protected TableName
+ClientScanner.tableName
 
 
 private TableName
-AsyncSingleRequestRpcRetryingCaller.tableName
+BufferedMutatorParams.tableName
 
 
-protected TableName
-TableBuilderBase.tableName
+private TableName
+AsyncClientScanner.tableName
 
 
-protected TableName
-RpcRetryingCallerWithReadReplicas.tableName
+private TableName
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName
 
 
-protected TableName
-AsyncTableBuilderBase.tableName
+private TableName
+AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName
 
 
 private TableName
-TableState.tableName
+HRegionLocator.tableName
 
 
 
@@ -2209,83 +2209,83 @@ service.
 
 
 TableName
-AsyncTable.getName()
-Gets the fully qualified table name instance of this 
table.
-
+RawAsyncTableImpl.getName()
 
 
 TableName
-Table.getName()
+RegionLocator.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
 
 TableName
-HRegionLocator.getName()
+BufferedMutatorImpl.getName()
 
 
 TableName
-AsyncTableRegionLocator.getName()
-Gets the fully qualified table name instance of the table 
whose region we want to locate.
+BufferedMutator.getName()
+Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
 
 
 
 TableName
-AsyncTableImpl.getName()
+HTable.getName()
 
 
 TableName
-RawAsyncTableImpl.getName()
+AsyncBufferedMutator.getName()
+Gets the fully qualified table name instance of the table 
that this
+ AsyncBufferedMutator writes to.
+
 
 
 TableName
-AsyncTableRegionLocatorImpl.getName()
+Table.getName()
+Gets the fully qualified table name instance of this 
table.
+
 
 
 TableName
-BufferedMutator.getName()
-Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
-
+AsyncTableImpl.getName()
 
 
 TableName
-RegionLocator.getName()
+AsyncTable.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
 
 TableName
-AsyncBufferedMutatorImpl.getName()
+AsyncTableRegionLocatorImpl.getName()
 
 
 TableName
-HTable.getName()
+AsyncTableRegionLocator.getName()
+Gets the fully qualified table 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
index 3c2959e..c233c17 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
@@ -405,16 +405,6 @@ service.
 
 
 boolean
-HTable.checkAndDelete(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  byte[]value,
-  Deletedelete)
-Deprecated.
-
-
-
-boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
@@ -425,18 +415,17 @@ service.
 
 
 
-
+
 boolean
-HTable.checkAndDelete(byte[]row,
+HTable.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
-  CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
 Deprecated.
 
 
-
+
 boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
@@ -449,18 +438,18 @@ service.
 
 
 
-
+
 boolean
-HTable.checkAndDelete(byte[]row,
+HTable.checkAndDelete(byte[]row,
   byte[]family,
   byte[]qualifier,
-  CompareOperatorop,
+  CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
 Deprecated.
 
 
-
+
 boolean
 Table.checkAndDelete(byte[]row,
   byte[]family,
@@ -473,29 +462,40 @@ service.
 
 
 
+
+boolean
+HTable.checkAndDelete(byte[]row,
+  byte[]family,
+  byte[]qualifier,
+  CompareOperatorop,
+  byte[]value,
+  Deletedelete)
+Deprecated.
+
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncTableImpl.delete(Deletedelete)
+AsyncTable.delete(Deletedelete)
+Deletes the specified cells/row.
+
 
 
 void
-HTable.delete(Deletedelete)
-
-
-void
 Table.delete(Deletedelete)
 Deletes the specified cells/row.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncTableImpl.delete(Deletedelete)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTable.delete(Deletedelete)
-Deletes the specified cells/row.
-
+RawAsyncTableImpl.delete(Deletedelete)
+
+
+void
+HTable.delete(Deletedelete)
 
 
 private boolean
@@ -508,19 +508,19 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+AsyncTable.CheckAndMutateBuilder.thenDelete(Deletedelete)
 
 
 boolean
-HTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+Table.CheckAndMutateBuilder.thenDelete(Deletedelete)
 
 
-boolean
-Table.CheckAndMutateBuilder.thenDelete(Deletedelete)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.CheckAndMutateBuilder.thenDelete(Deletedelete)
+boolean
+HTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
 
 
 
@@ -533,27 +533,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
index 4c96d78..f3e8e8b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
@@ -620,72 +620,72 @@ service.
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareOperatorop,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareOperatorop,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
@@ -718,27 +718,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTable.put(Putput)
-Puts some data to the table.
-
+RawAsyncTableImpl.put(Putput)
 
 
 void
+HTable.put(Putput)
+
+
+void
 Table.put(Putput)
 Puts some data in the table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTableImpl.put(Putput)
-
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncTableImpl.put(Putput)
+AsyncTableImpl.put(Putput)
 
 
-void
-HTable.put(Putput)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncTable.put(Putput)
+Puts some data to the table.
+
 
 
 boolean
@@ -757,27 +757,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
+RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
 
 
 boolean
-Table.CheckAndMutateBuilder.thenPut(Putput)
+HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
+boolean
+Table.CheckAndMutateBuilder.thenPut(Putput)
 
 

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -gt; 
state}
-1125   * @throws IOException
-1126   */
-1127  public static MapTableName, 
TableState getTableStates(Connection conn)
-1128  throws IOException {
-1129final MapTableName, 
TableState states = new LinkedHashMap();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitorT implements Visitor {
-1199final ListT results = new 
ArrayList();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213ListT getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitorResult {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239@Override

[28/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
index d9bb29e..c60f3c4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class ReplicationSyncUp.DummyServer
+static class ReplicationSyncUp.DummyServer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements Server
 
@@ -289,7 +289,7 @@ implements 
 
 hostname
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hostname
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hostname
 
 
 
@@ -298,7 +298,7 @@ implements 
 
 zkw
-ZKWatcher zkw
+ZKWatcher zkw
 
 
 
@@ -315,7 +315,7 @@ implements 
 
 DummyServer
-DummyServer(ZKWatcherzkw)
+DummyServer(ZKWatcherzkw)
 
 
 
@@ -324,7 +324,7 @@ implements 
 
 DummyServer
-DummyServer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
+DummyServer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
 
 
 
@@ -341,7 +341,7 @@ implements 
 
 getConfiguration
-publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
+publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
 Description copied from 
interface:Server
 Gets the configuration object for this server.
 
@@ -356,7 +356,7 @@ implements 
 
 getZooKeeper
-publicZKWatchergetZooKeeper()
+publicZKWatchergetZooKeeper()
 Description copied from 
interface:Server
 Gets the ZooKeeper instance for this server.
 
@@ -371,7 +371,7 @@ implements 
 
 getCoordinatedStateManager
-publicCoordinatedStateManagergetCoordinatedStateManager()
+publicCoordinatedStateManagergetCoordinatedStateManager()
 Description copied from 
interface:Server
 Get CoordinatedStateManager instance for this server.
 
@@ -386,7 +386,7 @@ implements 
 
 getMetaTableLocator
-publicMetaTableLocatorgetMetaTableLocator()
+publicMetaTableLocatorgetMetaTableLocator()
 Description copied from 
interface:Server
 Returns instance of MetaTableLocator
  running inside this server. This MetaServerLocator is started and stopped by 
server, clients
@@ -405,7 +405,7 @@ implements 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 Specified by:
 getServerNamein
 interfaceServer
@@ -420,7 +420,7 @@ implements 
 
 abort
-publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
+publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
   http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablee)
 Description copied from 
interface:Abortable
 Abort the server or client.
@@ -439,7 +439,7 @@ implements 
 
 isAborted
-publicbooleanisAborted()
+publicbooleanisAborted()
 Description copied from 
interface:Abortable
 Check if the server or client was aborted.
 
@@ -456,7 +456,7 @@ implements 
 
 stop
-publicvoidstop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
+publicvoidstop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 Description copied from 
interface:Stoppable
 Stop this service.
  Implementers should favor logging errors over throwing 
RuntimeExceptions.
@@ -474,7 +474,7 @@ implements 
 
 isStopped
-publicbooleanisStopped()
+publicbooleanisStopped()
 
 Specified by:
 isStoppedin
 interfaceStoppable
@@ -489,7 +489,7 @@ implements 
 
 getConnection
-publicClusterConnectiongetConnection()
+publicClusterConnectiongetConnection()
 Description copied from 
interface:Server
 Returns a reference to the servers' connection.
 
@@ -507,7 +507,7 @@ implements 
 
 getChoreService
-publicChoreServicegetChoreService()
+publicChoreServicegetChoreService()
 
 Specified by:
 getChoreServicein
 interfaceServer
@@ -522,7 +522,7 @@ implements 
 
 getClusterConnection
-publicClusterConnectiongetClusterConnection()

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/TestRegionLocations.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/TestRegionLocations.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestRegionLocations.html
index 89d54aa..e8a7c86 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/TestRegionLocations.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/TestRegionLocations.html
@@ -130,42 +130,46 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
+static HBaseClassTestRule
+CLASS_RULE
+
+
 (package private) 
org.apache.hadoop.hbase.HRegionInfo
 info0
 
-
+
 (package private) 
org.apache.hadoop.hbase.HRegionInfo
 info1
 
-
+
 (package private) 
org.apache.hadoop.hbase.HRegionInfo
 info2
 
-
+
 (package private) 
org.apache.hadoop.hbase.HRegionInfo
 info9
 
-
+
 (package private) long
 regionId1
 
-
+
 (package private) long
 regionId2
 
-
+
 (package private) 
org.apache.hadoop.hbase.ServerName
 sn0
 
-
+
 (package private) 
org.apache.hadoop.hbase.ServerName
 sn1
 
-
+
 (package private) 
org.apache.hadoop.hbase.ServerName
 sn2
 
-
+
 (package private) 
org.apache.hadoop.hbase.ServerName
 sn3
 
@@ -279,13 +283,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
 
 
 
 
 
 sn0
-org.apache.hadoop.hbase.ServerName sn0
+org.apache.hadoop.hbase.ServerName sn0
 
 
 
@@ -294,7 +307,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 sn1
-org.apache.hadoop.hbase.ServerName sn1
+org.apache.hadoop.hbase.ServerName sn1
 
 
 
@@ -303,7 +316,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 sn2
-org.apache.hadoop.hbase.ServerName sn2
+org.apache.hadoop.hbase.ServerName sn2
 
 
 
@@ -312,7 +325,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 sn3
-org.apache.hadoop.hbase.ServerName sn3
+org.apache.hadoop.hbase.ServerName sn3
 
 
 
@@ -321,7 +334,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 info0
-org.apache.hadoop.hbase.HRegionInfo info0
+org.apache.hadoop.hbase.HRegionInfo info0
 
 
 
@@ -330,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 info1
-org.apache.hadoop.hbase.HRegionInfo info1
+org.apache.hadoop.hbase.HRegionInfo info1
 
 
 
@@ -339,7 +352,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 info2
-org.apache.hadoop.hbase.HRegionInfo info2
+org.apache.hadoop.hbase.HRegionInfo info2
 
 
 
@@ -348,7 +361,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 info9
-org.apache.hadoop.hbase.HRegionInfo info9
+org.apache.hadoop.hbase.HRegionInfo info9
 
 
 
@@ -357,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionId1
-long regionId1
+long regionId1
 
 
 
@@ -366,7 +379,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionId2
-long regionId2
+long regionId2
 
 
 
@@ -400,7 +413,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSizeMethods
-publicvoidtestSizeMethods()
+publicvoidtestSizeMethods()
 
 
 
@@ -409,7 +422,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hri
-privateorg.apache.hadoop.hbase.HRegionInfohri(intreplicaId)
+privateorg.apache.hadoop.hbase.HRegionInfohri(intreplicaId)
 
 
 
@@ -418,7 +431,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hri
-privateorg.apache.hadoop.hbase.HRegionInfohri(longregionId,
+privateorg.apache.hadoop.hbase.HRegionInfohri(longregionId,
 intreplicaId)
 
 
@@ -428,7 +441,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hrl
-privateorg.apache.hadoop.hbase.HRegionLocationhrl(org.apache.hadoop.hbase.HRegionInfohri,
+privateorg.apache.hadoop.hbase.HRegionLocationhrl(org.apache.hadoop.hbase.HRegionInfohri,
 
org.apache.hadoop.hbase.ServerNamesn)
 
 
@@ -438,7 +451,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hrl
-privateorg.apache.hadoop.hbase.HRegionLocationhrl(org.apache.hadoop.hbase.HRegionInfohri,
+privateorg.apache.hadoop.hbase.HRegionLocationhrl(org.apache.hadoop.hbase.HRegionInfohri,
 
org.apache.hadoop.hbase.ServerNamesn,
 longseqNum)
 
@@ -449,7 +462,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hrll
-privateorg.apache.hadoop.hbase.RegionLocationshrll(org.apache.hadoop.hbase.HRegionLocation...locations)

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
index eb9e252..667152a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
@@ -28,22 +28,22 @@
 020
 021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
 022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
-029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
+038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
 039
 040import java.io.IOException;
 041import java.net.URI;
@@ -70,194 +70,194 @@
 062import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
 063import 
org.apache.hadoop.hbase.client.Connection;
 064import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-065import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-066import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-067import 
org.apache.yetus.audience.InterfaceAudience;
-068
-069/**
-070 * General backup commands, options and 
usage messages
-071 */
-072
+065import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+066import 
org.apache.yetus.audience.InterfaceAudience;
+067
+068import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+069
+070/**
+071 * General backup commands, options and 
usage messages
+072 */
 073@InterfaceAudience.Private
 074public final class BackupCommands {
-075
-076  public final static String 
INCORRECT_USAGE = "Incorrect usage";
-077
-078  public final static String 
TOP_LEVEL_NOT_ALLOWED =
-079  "Top level (root) folder is not 
allowed to be a backup destination";
-080
-081  public static final String USAGE = 
"Usage: hbase 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index ca8be5e..b8e6dfa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -6398,514 +6398,514 @@
 6390  int initialBatchProgress = 
scannerContext.getBatchProgress();
 6391  long initialSizeProgress = 
scannerContext.getDataSizeProgress();
 6392  long initialHeapSizeProgress = 
scannerContext.getHeapSizeProgress();
-6393  long initialTimeProgress = 
scannerContext.getTimeProgress();
-6394
-6395  // The loop here is used only when 
at some point during the next we determine
-6396  // that due to effects of filters 
or otherwise, we have an empty row in the result.
-6397  // Then we loop and try again. 
Otherwise, we must get out on the first iteration via return,
-6398  // "true" if there's more data to 
read, "false" if there isn't (storeHeap is at a stop row,
-6399  // and joinedHeap has no more data 
to read for the last row (if set, joinedContinuationRow).
-6400  while (true) {
-6401// Starting to scan a new row. 
Reset the scanner progress according to whether or not
-6402// progress should be kept.
-6403if 
(scannerContext.getKeepProgress()) {
-6404  // Progress should be kept. 
Reset to initial values seen at start of method invocation.
-6405  
scannerContext.setProgress(initialBatchProgress, initialSizeProgress,
-6406  initialHeapSizeProgress, 
initialTimeProgress);
-6407} else {
-6408  
scannerContext.clearProgress();
-6409}
-6410if (rpcCall.isPresent()) {
-6411  // If a user specifies a 
too-restrictive or too-slow scanner, the
-6412  // client might time out and 
disconnect while the server side
-6413  // is still processing the 
request. We should abort aggressively
-6414  // in that case.
-6415  long afterTime = 
rpcCall.get().disconnectSince();
-6416  if (afterTime = 0) {
-6417throw new 
CallerDisconnectedException(
-6418"Aborting on region " + 
getRegionInfo().getRegionNameAsString() + ", call " +
-6419this + " after " + 
afterTime + " ms, since " +
-6420"caller 
disconnected");
-6421  }
-6422}
-6423
-6424// Let's see what we have in the 
storeHeap.
-6425Cell current = 
this.storeHeap.peek();
-6426
-6427boolean shouldStop = 
shouldStop(current);
-6428// When has filter row is true 
it means that the all the cells for a particular row must be
-6429// read before a filtering 
decision can be made. This means that filters where hasFilterRow
-6430// run the risk of 
enLongAddering out of memory errors in the case that they are applied to a
-6431// table that has very large 
rows.
-6432boolean hasFilterRow = 
this.filter != null  this.filter.hasFilterRow();
-6433
-6434// If filter#hasFilterRow is 
true, partial results are not allowed since allowing them
-6435// would prevent the filters 
from being evaluated. Thus, if it is true, change the
-6436// scope of any limits that 
could potentially create partial results to
-6437// LimitScope.BETWEEN_ROWS so 
that those limits are not reached mid-row
-6438if (hasFilterRow) {
-6439  if (LOG.isTraceEnabled()) {
-6440
LOG.trace("filter#hasFilterRow is true which prevents partial results from 
being "
-6441+ " formed. Changing 
scope of limits that may create partials");
-6442  }
-6443  
scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS);
-6444  
scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS);
-6445}
-6446
-6447// Check if we were getting data 
from the joinedHeap and hit the limit.
-6448// If not, then it's main path - 
getting results from storeHeap.
-6449if (joinedContinuationRow == 
null) {
-6450  // First, check if we are at a 
stop row. If so, there are no more results.
-6451  if (shouldStop) {
-6452if (hasFilterRow) {
-6453  
filter.filterRowCells(results);
-6454}
-6455return 
scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
-6456  }
-6457
-6458  // Check if rowkey filter 
wants to exclude this row. If so, loop to next.
-6459  // Technically, if we hit 
limits before on this row, we don't need this call.
-6460  if (filterRowKey(current)) {
-6461

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 792ce9b..2304927 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -990,7 +990,7 @@ implements 
 
 DESERIALIZER_IDENTIFIER
-private static finalint DESERIALIZER_IDENTIFIER
+private static finalint DESERIALIZER_IDENTIFIER
 
 
 
@@ -1007,7 +1007,7 @@ implements 
 
 HFileBlock
-privateHFileBlock(HFileBlockthat)
+privateHFileBlock(HFileBlockthat)
 Copy constructor. Creates a shallow copy of 
that's buffer.
 
 
@@ -1017,7 +1017,7 @@ implements 
 
 HFileBlock
-privateHFileBlock(HFileBlockthat,
+privateHFileBlock(HFileBlockthat,
booleanbufCopy)
 Copy constructor. Creates a shallow/deep copy of 
that's buffer as per the boolean
  param.
@@ -1029,7 +1029,7 @@ implements 
 
 HFileBlock
-HFileBlock(BlockTypeblockType,
+HFileBlock(BlockTypeblockType,
intonDiskSizeWithoutHeader,
intuncompressedSizeWithoutHeader,
longprevBlockOffset,
@@ -1067,7 +1067,7 @@ implements 
 
 HFileBlock
-HFileBlock(ByteBuffbuf,
+HFileBlock(ByteBuffbuf,
booleanusesHBaseChecksum,
Cacheable.MemoryTypememType,
longoffset,
@@ -1100,7 +1100,7 @@ implements 
 
 init
-privatevoidinit(BlockTypeblockType,
+privatevoidinit(BlockTypeblockType,
   intonDiskSizeWithoutHeader,
   intuncompressedSizeWithoutHeader,
   longprevBlockOffset,
@@ -1117,7 +1117,7 @@ implements 
 
 getOnDiskSizeWithHeader
-private staticintgetOnDiskSizeWithHeader(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferheaderBuf,
+private staticintgetOnDiskSizeWithHeader(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferheaderBuf,
booleanverifyChecksum)
 Parse total on disk size including header and 
checksum.
 
@@ -1135,7 +1135,7 @@ implements 
 
 getNextBlockOnDiskSize
-intgetNextBlockOnDiskSize()
+intgetNextBlockOnDiskSize()
 
 Returns:
 the on-disk size of the next block (including the header size and any 
checksums if
@@ -1150,7 +1150,7 @@ implements 
 
 getBlockType
-publicBlockTypegetBlockType()
+publicBlockTypegetBlockType()
 
 Specified by:
 getBlockTypein
 interfaceCacheable
@@ -1165,7 +1165,7 @@ implements 
 
 getDataBlockEncodingId
-shortgetDataBlockEncodingId()
+shortgetDataBlockEncodingId()
 
 Returns:
 get data block encoding id that was used to encode this block
@@ -1178,7 +1178,7 @@ implements 
 
 getOnDiskSizeWithHeader
-publicintgetOnDiskSizeWithHeader()
+publicintgetOnDiskSizeWithHeader()
 
 Returns:
 the on-disk size of header + data part + checksum.
@@ -1191,7 +1191,7 @@ implements 
 
 getOnDiskSizeWithoutHeader
-intgetOnDiskSizeWithoutHeader()
+intgetOnDiskSizeWithoutHeader()
 
 Returns:
 the on-disk size of the data part + checksum (header excluded).
@@ -1204,7 +1204,7 @@ implements 
 
 getUncompressedSizeWithoutHeader
-intgetUncompressedSizeWithoutHeader()
+intgetUncompressedSizeWithoutHeader()
 
 Returns:
 the uncompressed size of data part (header and checksum excluded).
@@ -1217,7 +1217,7 @@ implements 
 
 getPrevBlockOffset
-longgetPrevBlockOffset()
+longgetPrevBlockOffset()
 
 Returns:
 the offset of the previous block of the same type in the file, or
@@ -1231,7 +1231,7 @@ implements 
 
 overwriteHeader
-privatevoidoverwriteHeader()
+privatevoidoverwriteHeader()
 Rewinds buf and writes first 4 header fields. 
buf position
  is modified as side-effect.
 
@@ -1242,7 +1242,7 @@ implements 
 
 getBufferWithoutHeader
-publicByteBuffgetBufferWithoutHeader()
+publicByteBuffgetBufferWithoutHeader()
 Returns a buffer that does not include the header or 
checksum.
 
 Returns:
@@ -1256,7 +1256,7 @@ implements 
 
 getBufferReadOnly
-publicByteBuffgetBufferReadOnly()
+publicByteBuffgetBufferReadOnly()
 Returns a read-only duplicate of the buffer this block 
stores internally ready to be read.
  Clients must not modify the buffer object though they may set position and 
limit on the
  returned buffer since we pass back a duplicate. This method has to be public 
because it is used
@@ -1275,7 +1275,7 @@ implements 
 
 sanityCheckAssertion
-privatevoidsanityCheckAssertion(longvalueFromBuf,
+privatevoidsanityCheckAssertion(longvalueFromBuf,
   longvalueFromField,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfieldName)
throws 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
index e05510e..2e114d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
@@ -54,891 +54,884 @@
 046import 
org.apache.hadoop.hbase.HTableDescriptor;
 047import 
org.apache.hadoop.hbase.MetaTableAccessor;
 048import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
-049import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.hadoop.hbase.client.ClusterConnection;
-053import 
org.apache.hadoop.hbase.client.Delete;
-054import 
org.apache.hadoop.hbase.client.Get;
-055import 
org.apache.hadoop.hbase.client.Mutation;
-056import 
org.apache.hadoop.hbase.client.Put;
-057import 
org.apache.hadoop.hbase.client.RegionInfo;
-058import 
org.apache.hadoop.hbase.client.Result;
-059import 
org.apache.hadoop.hbase.client.Scan;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableState;
-062import 
org.apache.hadoop.hbase.constraint.ConstraintException;
-063import 
org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-066import 
org.apache.hadoop.hbase.master.MasterServices;
-067import 
org.apache.hadoop.hbase.master.ServerListener;
-068import 
org.apache.hadoop.hbase.master.TableStateManager;
-069import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-070import 
org.apache.hadoop.hbase.net.Address;
-071import 
org.apache.hadoop.hbase.procedure2.Procedure;
-072import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-073import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-075import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-076import 
org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
-077import 
org.apache.hadoop.hbase.security.access.AccessControlLists;
-078import 
org.apache.hadoop.hbase.util.Bytes;
-079import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-081import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-082import 
org.apache.yetus.audience.InterfaceAudience;
-083import 
org.apache.zookeeper.KeeperException;
-084import org.slf4j.Logger;
-085import org.slf4j.LoggerFactory;
-086
-087import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-088import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-089import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092
-093/**
-094 * This is an implementation of {@link 
RSGroupInfoManager} which makes
-095 * use of an HBase table as the 
persistence store for the group information.
-096 * It also makes use of zookeeper to 
store group information needed
-097 * for bootstrapping during offline 
mode.
-098 *
-099 * h2Concurrency/h2
-100 * RSGroup state is kept locally in Maps. 
There is a rsgroup name to cached
-101 * RSGroupInfo Map at {@link #rsGroupMap} 
and a Map of tables to the name of the
-102 * rsgroup they belong too (in {@link 
#tableMap}). These Maps are persisted to the
-103 * hbase:rsgroup table (and cached in zk) 
on each modification.
-104 *
-105 * pMutations on state are 
synchronized but reads can continue without having
-106 * to wait on an instance monitor, 
mutations do wholesale replace of the Maps on
-107 * update -- Copy-On-Write; the local 
Maps of state are read-only, just-in-case
-108 * (see flushConfig).
-109 *
-110 * pReads must not block else 
there is a danger we'll deadlock.
-111 *
-112 * pClients of this class, the 
{@link RSGroupAdminEndpoint} for example, want to query and
-113 * then act on the results of the query 
modifying cache in zookeeper without another thread
-114 * making intermediate modifications. 
These clients synchronize on the 'this' instance so
-115 * no other has access concurrently. 
Reads must be able to continue concurrently.
-116 */
-117@InterfaceAudience.Private
-118final class RSGroupInfoManagerImpl 
implements RSGroupInfoManager {
-119  private static final Logger LOG = 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html
index c725ebc..85096e8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html
@@ -6,7 +6,7 @@
 
 
 
-001/**
+001/*
 002 * Licensed to the Apache Software 
Foundation (ASF) under one
 003 * or more contributor license 
agreements.  See the NOTICE file
 004 * distributed with this work for 
additional information
@@ -27,115 +27,107 @@
 019package 
org.apache.hadoop.hbase.zookeeper;
 020
 021import java.io.IOException;
-022import java.util.concurrent.TimeUnit;
-023
-024import 
org.apache.hadoop.conf.Configuration;
-025import 
org.apache.hadoop.hbase.HBaseConfiguration;
-026import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import 
org.apache.zookeeper.KeeperException;
-029import 
org.apache.zookeeper.ZooKeeperMain;
+022
+023import 
org.apache.hadoop.conf.Configuration;
+024import 
org.apache.hadoop.hbase.HBaseConfiguration;
+025import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+026import 
org.apache.yetus.audience.InterfaceAudience;
+027import 
org.apache.zookeeper.KeeperException;
+028import 
org.apache.zookeeper.ZooKeeperMain;
+029
 030
-031import 
org.apache.hbase.thirdparty.com.google.common.base.Stopwatch;
-032
-033/**
-034 * Tool for running ZookeeperMain from 
HBase by  reading a ZooKeeper server
-035 * from HBase XML configuration.
-036 */
-037@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-038public class ZKMainServer {
-039  private static final String SERVER_ARG 
= "-server";
-040
-041  public String parse(final Configuration 
c) {
-042return 
ZKConfig.getZKQuorumServersString(c);
-043  }
-044
-045  /**
-046   * ZooKeeper 3.4.6 broke being able to 
pass commands on command line.
-047   * See ZOOKEEPER-1897.  This class is a 
hack to restore this faclity.
-048   */
-049  private static class 
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain {
-050public 
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args)
-051throws IOException, 
InterruptedException {
-052  super(args);
-053  // Make sure we are connected 
before we proceed. Can take a while on some systems. If we
-054  // run the command without being 
connected, we get ConnectionLoss KeeperErrorConnection...
-055  Stopwatch stopWatch = 
Stopwatch.createStarted();
-056  while 
(!this.zk.getState().isConnected()) {
-057Thread.sleep(1);
-058if 
(stopWatch.elapsed(TimeUnit.SECONDS)  10) {
-059  throw new 
InterruptedException("Failed connect after waiting " +
-060  
stopWatch.elapsed(TimeUnit.SECONDS) + "seconds; state=" + this.zk.getState() 
+
-061  "; " + this.zk);
-062}
-063  }
-064}
-065
-066/**
-067 * Run the command-line args passed.  
Calls System.exit when done.
-068 * @throws KeeperException
-069 * @throws IOException
-070 * @throws InterruptedException
-071 */
-072void runCmdLine() throws 
KeeperException, IOException, InterruptedException {
-073  processCmd(this.cl);
-074  System.exit(0);
-075}
+031/**
+032 * Tool for running ZookeeperMain from 
HBase by  reading a ZooKeeper server
+033 * from HBase XML configuration.
+034 */
+035@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+036public class ZKMainServer {
+037  private static final String SERVER_ARG 
= "-server";
+038
+039  public String parse(final Configuration 
c) {
+040return 
ZKConfig.getZKQuorumServersString(c);
+041  }
+042
+043  /**
+044   * ZooKeeper 3.4.6 broke being able to 
pass commands on command line.
+045   * See ZOOKEEPER-1897.  This class is a 
hack to restore this faclity.
+046   */
+047  private static class 
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain {
+048public 
HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args)
+049throws IOException, 
InterruptedException {
+050  super(args);
+051  // Make sure we are connected 
before we proceed. Can take a while on some systems. If we
+052  // run the command without being 
connected, we get ConnectionLoss KeeperErrorConnection...
+053  // Make it 30seconds. We dont' have 
a config in this context and zk doesn't have
+054  // a timeout until after 
connection. 3ms is default for zk.
+055  
ZooKeeperHelper.ensureConnectedZooKeeper(this.zk, 3);
+056}
+057
+058/**
+059 * Run the command-line args passed.  
Calls System.exit when done.
+060 * @throws KeeperException
+061 * @throws IOException
+062 * @throws 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
index 972d795..d4f4a3d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
@@ -39,489 +39,490 @@
 031import org.apache.hadoop.fs.Path;
 032import 
org.apache.hadoop.hbase.HConstants;
 033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.yetus.audience.InterfaceStability;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
-039import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.FSUtils;
-042import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-043import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-044
-045/**
-046 * Base class of a WAL Provider that 
returns a single thread safe WAL that writes to Hadoop FS. By
-047 * default, this implementation picks a 
directory in Hadoop FS based on a combination of
-048 * ul
-049 * lithe HBase root directory
-050 * 
liHConstants.HREGION_LOGDIR_NAME
-051 * lithe given factory's 
factoryId (usually identifying the regionserver by host:port)
-052 * /ul
-053 * It also uses the providerId to 
differentiate among files.
-054 */
-055@InterfaceAudience.Private
-056@InterfaceStability.Evolving
-057public abstract class 
AbstractFSWALProviderT extends AbstractFSWAL? implements 
WALProvider {
-058
-059  private static final Logger LOG = 
LoggerFactory.getLogger(AbstractFSWALProvider.class);
-060
-061  /** Separate old log into different dir 
by regionserver name **/
-062  public static final String 
SEPARATE_OLDLOGDIR = "hbase.separate.oldlogdir.by.regionserver";
-063  public static final boolean 
DEFAULT_SEPARATE_OLDLOGDIR = false;
-064
-065  // Only public so classes back in 
regionserver.wal can access
-066  public interface Reader extends 
WAL.Reader {
-067/**
-068 * @param fs File system.
-069 * @param path Path.
-070 * @param c Configuration.
-071 * @param s Input stream that may 
have been pre-opened by the caller; may be null.
-072 */
-073void init(FileSystem fs, Path path, 
Configuration c, FSDataInputStream s) throws IOException;
-074  }
-075
-076  protected volatile T wal;
-077  protected WALFactory factory = null;
-078  protected Configuration conf = null;
-079  protected 
ListWALActionsListener listeners = null;
-080  protected String providerId = null;
-081  protected AtomicBoolean initialized = 
new AtomicBoolean(false);
-082  // for default wal provider, logPrefix 
won't change
-083  protected String logPrefix = null;
-084
-085  /**
-086   * we synchronized on walCreateLock to 
prevent wal recreation in different threads
-087   */
-088  private final Object walCreateLock = 
new Object();
-089
-090  /**
-091   * @param factory factory that made us, 
identity used for FS layout. may not be null
-092   * @param conf may not be null
-093   * @param listeners may be null
-094   * @param providerId differentiate 
between providers from one factory, used for FS layout. may be
-095   *  null
-096   */
-097  @Override
-098  public void init(WALFactory factory, 
Configuration conf, ListWALActionsListener listeners,
-099  String providerId) throws 
IOException {
-100if (!initialized.compareAndSet(false, 
true)) {
-101  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-102}
-103this.factory = factory;
-104this.conf = conf;
-105this.listeners = listeners;
-106this.providerId = providerId;
-107// get log prefix
-108StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-109if (providerId != null) {
-110  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-111sb.append(providerId);
-112  } else {
-113
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-114  }
-115}
-116logPrefix = sb.toString();
-117doInit(conf);
-118  }
-119
-120  @Override
-121  public ListWAL getWALs() {
-122if (wal == null) {
-123  return Collections.emptyList();
-124}
-125ListWAL wals = new 
ArrayList(1);
-126wals.add(wal);
-127return wals;
-128  }
-129
-130  @Override
-131  public T getWAL(byte[] identifier, 
byte[] namespace) throws IOException {
-132T walCopy = wal;
-133if (walCopy == null) {
-134  // only lock when need to create 
wal, and need to lock since
-135  // creating hlog on fs is 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 89f9554..45e4434 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class AsyncHBaseAdmin
+class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 Just a wrapper of RawAsyncHBaseAdmin. The 
difference is that users need to provide a
@@ -246,26 +246,32 @@ implements 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCacheEvictionStats
+clearBlockCache(TableNametableName)
+Clear all the blocks corresponding to this table from 
BlockCache.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 clearCompactionQueues(ServerNameserverName,
  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringqueues)
 Clear compacting queues on a region server.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
 clearDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
 Clear dead region 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index f2b5705..f9f3d54 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -232,7 +232,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(TableNametableName)
+AddColumnFamilyProcedureBiConsumer(TableNametableName)
 
 
 
@@ -249,7 +249,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Specified by:
 getOperationTypein
 classRawAsyncHBaseAdmin.TableProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index 7d6b664..5479838 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
+private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
 
 
 
@@ -159,7 +159,7 @@ private static interface 
 
 call
-voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
+voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
   HBaseRpcControllercontroller,
   REQreq,
   org.apache.hbase.thirdparty.com.google.protobuf.RpcCallbackRESPdone)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 572d7cc..cd4daf6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.ConverterD,S
+private static interface RawAsyncHBaseAdmin.ConverterD,S
 
 
 
@@ -156,7 +156,7 @@ private static interface 
 
 convert
-Dconvert(Ssrc)
+Dconvert(Ssrc)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 5942eea..01f186e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
index 70183cf..80e0772 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -381,6 +381,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index d6b4c83..520a90b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -325,6 +325,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 278bbb1..23721d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -4505,6 +4505,6 @@ implements Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
index 266abdc..cd0db5f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
@@ -501,6 +501,6 @@ implements Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
index de0b4fd..66fb3a9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
@@ -235,6 +235,6 @@ private static interface Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
index ed3a835..328da43 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
@@ -454,6 +454,6 @@ implements Copyright  20072017 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
index 91eec45..d1cd185 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
@@ -88,400 +88,396 @@
 080  private final static String 
CONF_COMPACT_ONCE = "hbase.compactiontool.compact.once";
 081  private final static String 
CONF_COMPACT_MAJOR = "hbase.compactiontool.compact.major";
 082  private final static String 
CONF_DELETE_COMPACTED = "hbase.compactiontool.delete";
-083  private final static String 
CONF_COMPLETE_COMPACTION = "hbase.hstore.compaction.complete";
-084
-085  /**
-086   * Class responsible to execute the 
Compaction on the specified path.
-087   * The path can be a table, region or 
family directory.
-088   */
-089  private static class CompactionWorker 
{
-090private final boolean 
keepCompactedFiles;
-091private final boolean 
deleteCompacted;
-092private final Configuration conf;
-093private final FileSystem fs;
-094private final Path tmpDir;
-095
-096public CompactionWorker(final 
FileSystem fs, final Configuration conf) {
-097  this.conf = conf;
-098  this.keepCompactedFiles = 
!conf.getBoolean(CONF_COMPLETE_COMPACTION, true);
-099  this.deleteCompacted = 
conf.getBoolean(CONF_DELETE_COMPACTED, false);
-100  this.tmpDir = new 
Path(conf.get(CONF_TMP_DIR));
-101  this.fs = fs;
-102}
-103
-104/**
-105 * Execute the compaction on the 
specified path.
-106 *
-107 * @param path Directory path on 
which to run compaction.
-108 * @param compactOnce Execute just a 
single step of compaction.
-109 * @param major Request major 
compaction.
-110 */
-111public void compact(final Path path, 
final boolean compactOnce, final boolean major) throws IOException {
-112  if (isFamilyDir(fs, path)) {
-113Path regionDir = 
path.getParent();
-114Path tableDir = 
regionDir.getParent();
-115TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-116RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-117compactStoreFiles(tableDir, htd, 
hri,
-118path.getName(), compactOnce, 
major);
-119  } else if (isRegionDir(fs, path)) 
{
-120Path tableDir = 
path.getParent();
-121TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-122compactRegion(tableDir, htd, 
path, compactOnce, major);
-123  } else if (isTableDir(fs, path)) 
{
-124compactTable(path, compactOnce, 
major);
-125  } else {
-126throw new IOException(
-127  "Specified path is not a table, 
region or family directory. path=" + path);
-128  }
-129}
-130
-131private void compactTable(final Path 
tableDir, final boolean compactOnce, final boolean major)
-132throws IOException {
-133  TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-134  for (Path regionDir: 
FSUtils.getRegionDirs(fs, tableDir)) {
-135compactRegion(tableDir, htd, 
regionDir, compactOnce, major);
-136  }
-137}
-138
-139private void compactRegion(final Path 
tableDir, final TableDescriptor htd,
-140final Path regionDir, final 
boolean compactOnce, final boolean major)
-141throws IOException {
-142  RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-143  for (Path familyDir: 
FSUtils.getFamilyDirs(fs, regionDir)) {
-144compactStoreFiles(tableDir, htd, 
hri, familyDir.getName(), compactOnce, major);
-145  }
-146}
-147
-148/**
-149 * Execute the actual compaction 
job.
-150 * If the compact once flag is not 
specified, execute the compaction until
-151 * no more compactions are needed. 
Uses the Configuration settings provided.
-152 */
-153private void compactStoreFiles(final 
Path tableDir, final TableDescriptor htd,
-154final RegionInfo hri, final 
String familyName, final boolean compactOnce,
-155final boolean major) throws 
IOException {
-156  HStore store = getStore(conf, fs, 
tableDir, htd, hri, familyName, tmpDir);
-157  LOG.info("Compact table=" + 
htd.getTableName() +
-158" region=" + 
hri.getRegionNameAsString() +
-159" family=" + familyName);
-160  if (major) {
-161store.triggerMajorCompaction();
-162  }
-163  do {
-164OptionalCompactionContext 
compaction =
-165

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/client/RowMutations.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RowMutations.html 
b/devapidocs/org/apache/hadoop/hbase/client/RowMutations.html
index f594075..771948f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RowMutations.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RowMutations.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":42,"i3":42,"i4":10,"i5":10,"i6":10,"i7":42,"i8":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -122,7 +122,7 @@ implements We compare and equate mutations based off their row so be careful putting 
RowMutations
  into Sets or using them as keys in Maps.
 
@@ -152,6 +152,13 @@ implements row
 
 
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.client.Row
+COMPARATOR
+
 
 
 
@@ -184,7 +191,7 @@ implements 
 Method Summary
 
-All MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
 
 Modifier and Type
 Method and Description
@@ -203,11 +210,21 @@ implements 
 
 int
-compareTo(Rowi)
+compareTo(Rowi)
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ Use Row.COMPARATOR
 instead
+
+
 
 
 boolean
-equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ No replacement
+
+
 
 
 int
@@ -223,7 +240,12 @@ implements 
 
 int
-hashCode()
+hashCode()
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ No replacement
+
+
 
 
 private void
@@ -364,10 +386,15 @@ implements 
 
 compareTo
-publicintcompareTo(Rowi)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicintcompareTo(Rowi)
+Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0.
+ Use Row.COMPARATOR
 instead
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-;
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRow
+Specified by:
+compareToin
 interfaceRow
 
 
 
@@ -377,7 +404,10 @@ implements 
 
 equals
-publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0.
+ No replacement
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equalsin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -390,7 +420,10 @@ implements 
 
 hashCode
-publicinthashCode()
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicinthashCode()
+Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0.
+ No replacement
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCodein 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -403,7 +436,7 @@ implements 
 
 getRow
-publicbyte[]getRow()
+publicbyte[]getRow()
 
 Specified by:
 getRowin
 interfaceRow
@@ -418,7 +451,7 @@ implements 
 
 getMutations
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListMutationgetMutations()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
deleted file mode 100644
index a8c2ef3..000
--- 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
+++ /dev/null
@@ -1,1180 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-PrivateCellUtil.TagRewriteByteBufferCell (Apache HBase 3.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase
-Class 
PrivateCellUtil.TagRewriteByteBufferCell
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.ByteBufferCell
-
-
-org.apache.hadoop.hbase.PrivateCellUtil.TagRewriteByteBufferCell
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
-
-
-Direct Known Subclasses:
-PrivateCellUtil.ValueAndTagRewriteByteBufferCell
-
-
-Enclosing class:
-PrivateCellUtil
-
-
-
-static class PrivateCellUtil.TagRewriteByteBufferCell
-extends ByteBufferCell
-implements ExtendedCell
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Cell
-Cell.DataType
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-protected ByteBufferCell
-cell
-
-
-private static int
-HEAP_SIZE_OVERHEAD
-
-
-protected byte[]
-tags
-
-
-
-
-
-
-Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
-CELL_NOT_BASED_ON_CHUNK
-
-
-
-
-
-Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
-MAX_TAGS_LENGTH
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TagRewriteByteBufferCell(ByteBufferCellcell,
-byte[]tags)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-ExtendedCell
-deepClone()
-Does a deep copy of the contents to a new memory area and 
returns it as a new cell.
-
-
-
-byte[]
-getFamilyArray()
-Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
- containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getFamilyByteBuffer()
-
-
-byte
-getFamilyLength()
-
-
-int
-getFamilyOffset()
-
-
-int
-getFamilyPosition()
-
-
-byte[]
-getQualifierArray()
-Contiguous raw bytes that may start at any index in the 
containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getQualifierByteBuffer()
-
-
-int
-getQualifierLength()
-
-
-int
-getQualifierOffset()
-
-
-int
-getQualifierPosition()
-
-
-byte[]
-getRowArray()
-Contiguous raw bytes that may start at any index in the 
containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getRowByteBuffer()
-
-
-short
-getRowLength()
-
-
-int
-getRowOffset()
-
-
-int
-getRowPosition()
-
-
-long
-getSequenceId()
-A region-specific unique monotonically increasing sequence 
ID given to 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
index 975d227..d14d073 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class Mutation.CellWrapper
+private static final class Mutation.CellWrapper
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ExtendedCell
 
@@ -277,7 +277,7 @@ implements 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag
+http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorTag
 getTags()
 Creates a list of tags in the current cell
 
@@ -388,7 +388,7 @@ implements 
 
 FIXED_OVERHEAD
-private static finallong FIXED_OVERHEAD
+private static finallong FIXED_OVERHEAD
 
 
 
@@ -397,7 +397,7 @@ implements 
 
 cell
-private finalCell cell
+private finalCell cell
 
 
 
@@ -406,7 +406,7 @@ implements 
 
 sequenceId
-privatelong sequenceId
+privatelong sequenceId
 
 
 
@@ -415,7 +415,7 @@ implements 
 
 timestamp
-privatelong timestamp
+privatelong timestamp
 
 
 
@@ -432,7 +432,7 @@ implements 
 
 CellWrapper
-CellWrapper(Cellcell)
+CellWrapper(Cellcell)
 
 
 
@@ -449,7 +449,7 @@ implements 
 
 setSequenceId
-publicvoidsetSequenceId(longseqId)
+publicvoidsetSequenceId(longseqId)
 Description copied from 
interface:ExtendedCell
 Sets with the given seqId.
 
@@ -466,7 +466,7 @@ implements 
 
 setTimestamp
-publicvoidsetTimestamp(longts)
+publicvoidsetTimestamp(longts)
 Description copied from 
interface:ExtendedCell
 Sets with the given timestamp.
 
@@ -483,7 +483,7 @@ implements 
 
 setTimestamp
-publicvoidsetTimestamp(byte[]ts)
+publicvoidsetTimestamp(byte[]ts)
 Description copied from 
interface:ExtendedCell
 Sets with the given timestamp.
 
@@ -500,7 +500,7 @@ implements 
 
 getSequenceId
-publiclonggetSequenceId()
+publiclonggetSequenceId()
 Description copied from 
interface:ExtendedCell
 A region-specific unique monotonically increasing sequence 
ID given to each Cell. It always
  exists for cells in the memstore but is not retained forever. It will be kept 
for
@@ -522,7 +522,7 @@ implements 
 
 getValueArray
-publicbyte[]getValueArray()
+publicbyte[]getValueArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array. Max length is
  Integer.MAX_VALUE which is 2,147,483,647 bytes.
@@ -540,7 +540,7 @@ implements 
 
 getValueOffset
-publicintgetValueOffset()
+publicintgetValueOffset()
 
 Specified by:
 getValueOffsetin
 interfaceCell
@@ -555,7 +555,7 @@ implements 
 
 getValueLength
-publicintgetValueLength()
+publicintgetValueLength()
 
 Specified by:
 getValueLengthin
 interfaceCell
@@ -570,7 +570,7 @@ implements 
 
 getTagsArray
-publicbyte[]getTagsArray()
+publicbyte[]getTagsArray()
 Description copied from 
interface:ExtendedCell
 Contiguous raw bytes representing tags that may start at 
any index in the containing array.
 
@@ -589,7 +589,7 @@ implements 
 
 getTagsOffset
-publicintgetTagsOffset()
+publicintgetTagsOffset()
 
 Specified by:
 getTagsOffsetin
 interfaceCell
@@ -606,7 +606,7 @@ implements 
 
 getTagsLength
-publicintgetTagsLength()
+publicintgetTagsLength()
 Description copied from 
interface:ExtendedCell
 HBase internally uses 2 bytes to store tags length in Cell. 
As the tags length is always a
  non-negative number, to make good use of the sign bit, the max of tags length 
is defined 2 *
@@ -629,7 +629,7 @@ implements 
 
 getRowArray
-publicbyte[]getRowArray()
+publicbyte[]getRowArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array. Max length is
  Short.MAX_VALUE which is 32,767 bytes.
@@ -647,7 +647,7 @@ implements 
 
 getRowOffset
-publicintgetRowOffset()
+publicintgetRowOffset()
 
 Specified by:
 getRowOffsetin
 interfaceCell
@@ -662,7 +662,7 @@ implements 
 
 getRowLength
-publicshortgetRowLength()
+publicshortgetRowLength()
 
 Specified by:
 getRowLengthin
 interfaceCell
@@ -677,7 +677,7 @@ implements 
 
 getFamilyArray
-publicbyte[]getFamilyArray()
+publicbyte[]getFamilyArray()
 Description copied from 
interface:Cell
 Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
  containing array. Max length is Byte.MAX_VALUE, which is 127 bytes.
@@ -695,7 +695,7 @@ implements 
 
 getFamilyOffset
-publicintgetFamilyOffset()

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html 
b/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
index ac67944..398945e 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-Append, BlockCacheKey, BucketCache, BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 
 ByteBufferChunkCell, ByteBufferKeyValue, CombinedBlockCache, Delete, HFileBlock, HFileBlockIndex.BlockIndexReader, HFileBlockIndex.ByteArrayKeyBlockIndexReader,
 HFileBlockIndex.CellBasedKeyBlockIndexReader,
 HFileContext, HMobStore, HRegion, HStore, InclusiveCombinedBlockCache, Increment, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, LruBlockCache, LruCachedBlock, LruCachedBlockQueue, MapReduceCell, Mutation, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, <
 a 
href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html"
 title="class in 
org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, 
PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, 
PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, Put, SizeCachedKeyValue, SizeCachedNoTagsKeyValue, WALEdit, WALSplitter.RegionEntryBuffer
+Append, BlockCacheKey, BucketCache, BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 
 ByteBufferChunkCell, ByteBufferKeyValue, CombinedBlockCache, Delete, HFileBlock, HFileBlockIndex.BlockIndexReader, HFileBlockIndex.ByteArrayKeyBlockIndexReader,
 HFileBlockIndex.CellBasedKeyBlockIndexReader,
 HFileContext, HMobStore, HRegion, HStore, InclusiveCombinedBlockCache, Increment, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, LruBlockCache, LruCachedBlock, LruCachedBlockQueue, MapReduceCell, Mutation, Mutation.CellWrapper, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html" 
 >title="class in org.apache.hadoop.hbase">PrivateCellUtil.EmptyCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html"
 > title="class in org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowCell, 
 >href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowColByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowColCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html"
 > title="class in org.apache.hadoop.hbase">P
 rivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, 
PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, Put, SizeCachedKeyValue, SizeCachedNoTagsKeyValue, WALEdit, WALSplitter.RegionEntryBuffer
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
index c4899b8..4817a35 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
@@ -316,6 +316,10 @@
 Mutation
 
 
+private static class
+Mutation.CellWrapper
+
+
 class
 Put
 Used to perform Put operations for a single row.


[28/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfig.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfig.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfig.html
index a7fd0b8..e8e84fe 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfig.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfig.html
@@ -283,9 +283,14 @@
 
 
 static ReplicationPeerConfig
-ReplicationPeerConfigUtil.convert(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerpeer)
+ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
+ ReplicationPeerConfigpeerConfig)
 
 
+static ReplicationPeerConfig
+ReplicationPeerConfigUtil.convert(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationPeerpeer)
+
+
 ReplicationPeerConfig
 ReplicationAdmin.getPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 Deprecated.
@@ -294,10 +299,16 @@
 
 
 
-
+
 static ReplicationPeerConfig
 ReplicationPeerConfigUtil.parsePeerFrom(byte[]bytes)
 
+
+static ReplicationPeerConfig
+ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
+   ReplicationPeerConfigpeerConfig,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
+
 
 
 
@@ -347,8 +358,8 @@
 
 
 
-static void
-ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,? extends http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
+static ReplicationPeerConfig
+ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
  ReplicationPeerConfigpeerConfig)
 
 
@@ -356,8 +367,8 @@
 ReplicationPeerConfigUtil.convert(ReplicationPeerConfigpeerConfig)
 
 
-static void
-ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,? extends http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
+static ReplicationPeerConfig
+ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtableCfs,
ReplicationPeerConfigpeerConfig,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 
@@ -601,6 +612,14 @@
 
 
 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html 
b/apidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
new file mode 100644
index 000..0e66fe5
--- /dev/null
+++ b/apidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
@@ -0,0 +1,162 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Copyright The Apache Software 
Foundation
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 * Unless required by applicable law or 
agreed to in writing, software
+012 * distributed under the License is 
distributed on an "AS IS" BASIS,
+013 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+014 * See the License for the specific 
language governing permissions and
+015 * limitations under the License.
+016 */
+017
+018package org.apache.hadoop.hbase;
+019
+020import 
edu.umd.cs.findbugs.annotations.Nullable;
+021import java.util.List;
+022import java.util.Map;
+023import 
org.apache.hadoop.hbase.replication.ReplicationLoadSink;
+024import 
org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+025import 
org.apache.yetus.audience.InterfaceAudience;
+026
+027/**
+028 * This class is used for exporting 
current state of load on a RegionServer.
+029 */
+030@InterfaceAudience.Public
+031public interface ServerMetrics {
+032
+033  ServerName getServerName();
+034  /**
+035   * @return the number of requests per 
second.
+036   */
+037  long getRequestCountPerSecond();
+038
+039  /**
+040   * @return total Number of requests 
from the start of the region server.
+041   */
+042  long getRequestCount();
+043
+044  /**
+045   * @return the amount of used heap
+046   */
+047  Size getUsedHeapSize();
+048
+049  /**
+050   * @return the maximum allowable size 
of the heap
+051   */
+052  Size getMaxHeapSize();
+053
+054  int getInfoServerPort();
+055
+056  /**
+057   * Call directly from client such as 
hbase shell
+058   * @return the list of 
ReplicationLoadSource
+059   */
+060  ListReplicationLoadSource 
getReplicationLoadSourceList();
+061
+062  /**
+063   * Call directly from client such as 
hbase shell
+064   * @return ReplicationLoadSink
+065   */
+066  @Nullable
+067  ReplicationLoadSink 
getReplicationLoadSink();
+068
+069  /**
+070   * @return region load metrics
+071   */
+072  Mapbyte[], RegionMetrics 
getRegionMetrics();
+073
+074  /**
+075   * Return the RegionServer-level and 
Region-level coprocessors
+076   * @return string list of loaded 
RegionServer-level and Region-level coprocessors
+077   */
+078  ListString 
getCoprocessorNames();
+079
+080  /**
+081   * @return the timestamp (server side) 
of generating this metrics
+082   */
+083  long getReportTimestamp();
+084
+085  /**
+086   * @return the last timestamp (server 
side) of generating this metrics
+087   */
+088  long getLastReportTimestamp();
+089
+090}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/Size.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/Size.html 
b/apidocs/src-html/org/apache/hadoop/hbase/Size.html
new file mode 100644
index 000..5975512
--- /dev/null
+++ b/apidocs/src-html/org/apache/hadoop/hbase/Size.html
@@ -0,0 +1,230 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Copyright The Apache Software 
Foundation
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 * Unless required by applicable law or 
agreed to in writing, software
+012 * distributed under the License is 
distributed on an "AS IS" BASIS,
+013 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+014 * See the License for the specific 
language governing 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html 
b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
index add30e1..594966b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
@@ -40,1084 +40,1083 @@
 032
 033import 
org.apache.commons.cli.CommandLine;
 034import org.apache.commons.cli.Option;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.fs.FSDataInputStream;
-039import 
org.apache.hadoop.fs.FSDataOutputStream;
-040import 
org.apache.hadoop.fs.FileChecksum;
-041import org.apache.hadoop.fs.FileStatus;
-042import org.apache.hadoop.fs.FileSystem;
-043import org.apache.hadoop.fs.FileUtil;
-044import org.apache.hadoop.fs.Path;
-045import 
org.apache.hadoop.fs.permission.FsPermission;
-046import 
org.apache.hadoop.hbase.HBaseConfiguration;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.client.RegionInfo;
-050import 
org.apache.hadoop.hbase.io.FileLink;
-051import 
org.apache.hadoop.hbase.io.HFileLink;
-052import 
org.apache.hadoop.hbase.io.WALLink;
-053import 
org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
-054import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-055import 
org.apache.hadoop.hbase.mob.MobUtils;
-056import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
-057import 
org.apache.hadoop.hbase.util.FSUtils;
-058import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-059import 
org.apache.hadoop.hbase.util.Pair;
-060import 
org.apache.hadoop.io.BytesWritable;
-061import org.apache.hadoop.io.IOUtils;
-062import 
org.apache.hadoop.io.NullWritable;
-063import org.apache.hadoop.io.Writable;
-064import 
org.apache.hadoop.mapreduce.InputFormat;
-065import 
org.apache.hadoop.mapreduce.InputSplit;
-066import org.apache.hadoop.mapreduce.Job;
-067import 
org.apache.hadoop.mapreduce.JobContext;
-068import 
org.apache.hadoop.mapreduce.Mapper;
-069import 
org.apache.hadoop.mapreduce.RecordReader;
-070import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-071import 
org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-072import 
org.apache.hadoop.mapreduce.security.TokenCache;
-073import 
org.apache.hadoop.util.StringUtils;
-074import org.apache.hadoop.util.Tool;
-075import 
org.apache.yetus.audience.InterfaceAudience;
-076
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-080
-081/**
-082 * Export the specified snapshot to a 
given FileSystem.
-083 *
-084 * The .snapshot/name folder is copied to 
the destination cluster
-085 * and then all the hfiles/wals are 
copied using a Map-Reduce Job in the .archive/ location.
-086 * When everything is done, the second 
cluster can restore the snapshot.
-087 */
-088@InterfaceAudience.Public
-089public class ExportSnapshot extends 
AbstractHBaseTool implements Tool {
-090  public static final String NAME = 
"exportsnapshot";
-091  /** Configuration prefix for overrides 
for the source filesystem */
-092  public static final String 
CONF_SOURCE_PREFIX = NAME + ".from.";
-093  /** Configuration prefix for overrides 
for the destination filesystem */
-094  public static final String 
CONF_DEST_PREFIX = NAME + ".to.";
-095
-096  private static final Log LOG = 
LogFactory.getLog(ExportSnapshot.class);
-097
-098  private static final String MR_NUM_MAPS 
= "mapreduce.job.maps";
-099  private static final String 
CONF_NUM_SPLITS = "snapshot.export.format.splits";
-100  private static final String 
CONF_SNAPSHOT_NAME = "snapshot.export.format.snapshot.name";
-101  private static final String 
CONF_SNAPSHOT_DIR = "snapshot.export.format.snapshot.dir";
-102  private static final String 
CONF_FILES_USER = "snapshot.export.files.attributes.user";
-103  private static final String 
CONF_FILES_GROUP = "snapshot.export.files.attributes.group";
-104  private static final String 
CONF_FILES_MODE = "snapshot.export.files.attributes.mode";
-105  private static final String 
CONF_CHECKSUM_VERIFY = "snapshot.export.checksum.verify";
-106  private static final String 
CONF_OUTPUT_ROOT = "snapshot.export.output.root";
-107  private static final String 
CONF_INPUT_ROOT = "snapshot.export.input.root";
-108  private static final String 
CONF_BUFFER_SIZE = "snapshot.export.buffer.size";
-109  private static final String 
CONF_MAP_GROUP = 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 4dedbc2..ffeadbf 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -902,350 +902,351 @@
 894return allowPartialResults;
 895  }
 896
-897  public Scan 
setLoadColumnFamiliesOnDemand(boolean value) {
-898return (Scan) 
super.setLoadColumnFamiliesOnDemand(value);
-899  }
-900
-901  /**
-902   * Compile the table and column family 
(i.e. schema) information
-903   * into a String. Useful for parsing 
and aggregation by debugging,
-904   * logging, and administration tools.
-905   * @return Map
-906   */
-907  @Override
-908  public MapString, Object 
getFingerprint() {
-909MapString, Object map = new 
HashMap();
-910ListString families = new 
ArrayList();
-911if(this.familyMap.isEmpty()) {
-912  map.put("families", "ALL");
-913  return map;
-914} else {
-915  map.put("families", families);
-916}
-917for (Map.Entrybyte [], 
NavigableSetbyte[] entry :
-918this.familyMap.entrySet()) {
-919  
families.add(Bytes.toStringBinary(entry.getKey()));
-920}
-921return map;
-922  }
-923
-924  /**
-925   * Compile the details beyond the scope 
of getFingerprint (row, columns,
-926   * timestamps, etc.) into a Map along 
with the fingerprinted information.
-927   * Useful for debugging, logging, and 
administration tools.
-928   * @param maxCols a limit on the number 
of columns output prior to truncation
-929   * @return Map
-930   */
-931  @Override
-932  public MapString, Object 
toMap(int maxCols) {
-933// start with the fingerpring map and 
build on top of it
-934MapString, Object map = 
getFingerprint();
-935// map from families to column list 
replaces fingerprint's list of families
-936MapString, ListString 
familyColumns = new HashMap();
-937map.put("families", familyColumns);
-938// add scalar information first
-939map.put("startRow", 
Bytes.toStringBinary(this.startRow));
-940map.put("stopRow", 
Bytes.toStringBinary(this.stopRow));
-941map.put("maxVersions", 
this.maxVersions);
-942map.put("batch", this.batch);
-943map.put("caching", this.caching);
-944map.put("maxResultSize", 
this.maxResultSize);
-945map.put("cacheBlocks", 
this.cacheBlocks);
-946map.put("loadColumnFamiliesOnDemand", 
this.loadColumnFamiliesOnDemand);
-947ListLong timeRange = new 
ArrayList(2);
-948timeRange.add(this.tr.getMin());
-949timeRange.add(this.tr.getMax());
-950map.put("timeRange", timeRange);
-951int colCount = 0;
-952// iterate through affected families 
and list out up to maxCols columns
-953for (Map.Entrybyte [], 
NavigableSetbyte[] entry :
-954  this.familyMap.entrySet()) {
-955  ListString columns = new 
ArrayList();
-956  
familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
-957  if(entry.getValue() == null) {
-958colCount++;
-959--maxCols;
-960columns.add("ALL");
-961  } else {
-962colCount += 
entry.getValue().size();
-963if (maxCols = 0) {
-964  continue;
-965}
-966for (byte [] column : 
entry.getValue()) {
-967  if (--maxCols = 0) {
-968continue;
-969  }
-970  
columns.add(Bytes.toStringBinary(column));
-971}
-972  }
-973}
-974map.put("totalColumns", colCount);
-975if (this.filter != null) {
-976  map.put("filter", 
this.filter.toString());
-977}
-978// add the id if set
-979if (getId() != null) {
-980  map.put("id", getId());
-981}
-982return map;
-983  }
-984
-985  /**
-986   * Enable/disable "raw" mode for this 
scan.
-987   * If "raw" is enabled the scan will 
return all
-988   * delete marker and deleted rows that 
have not
-989   * been collected, yet.
-990   * This is mostly useful for Scan on 
column families
-991   * that have KEEP_DELETED_ROWS 
enabled.
-992   * It is an error to specify any column 
when "raw" is set.
-993   * @param raw True/False to 
enable/disable "raw" mode.
-994   */
-995  public Scan setRaw(boolean raw) {
-996setAttribute(RAW_ATTR, 
Bytes.toBytes(raw));
-997return this;
-998  }
-999
-1000  /**
-1001   * @return True if this Scan is in 
"raw" mode.
-1002   */
-1003  public boolean isRaw() {
-1004byte[] attr = 
getAttribute(RAW_ATTR);
-1005return attr == null ? false : 
Bytes.toBoolean(attr);
-1006  }
-1007
-1008  /**
-1009   * Set whether this scan is a small 
scan
-1010   * p
-1011   * Small scan should use pread and big 
scan can use seek + read seek + read is fast but can cause
-1012   * 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.html
index 252bafd..72294e8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class DefaultVisibilityLabelServiceImpl
+public class DefaultVisibilityLabelServiceImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements VisibilityLabelService
 
@@ -383,7 +383,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -392,7 +392,7 @@ implements 
 
 SYSTEM_LABEL_ORDINAL
-private static finalint SYSTEM_LABEL_ORDINAL
+private static finalint SYSTEM_LABEL_ORDINAL
 
 See Also:
 Constant
 Field Values
@@ -405,7 +405,7 @@ implements 
 
 LABELS_TABLE_TAGS
-private static finalTag[] LABELS_TABLE_TAGS
+private static finalTag[] LABELS_TABLE_TAGS
 
 
 
@@ -414,7 +414,7 @@ implements 
 
 DUMMY_VALUE
-private static finalbyte[] DUMMY_VALUE
+private static finalbyte[] DUMMY_VALUE
 
 
 
@@ -423,7 +423,7 @@ implements 
 
 ordinalCounter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger ordinalCounter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger ordinalCounter
 
 
 
@@ -432,7 +432,7 @@ implements 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -441,7 +441,7 @@ implements 
 
 labelsRegion
-privateRegion labelsRegion
+privateRegion labelsRegion
 
 
 
@@ -450,7 +450,7 @@ implements 
 
 labelsCache
-privateVisibilityLabelsCache labelsCache
+privateVisibilityLabelsCache labelsCache
 
 
 
@@ -459,7 +459,7 @@ implements 
 
 scanLabelGenerators
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListScanLabelGenerator scanLabelGenerators
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListScanLabelGenerator scanLabelGenerators
 
 
 
@@ -476,7 +476,7 @@ implements 
 
 DefaultVisibilityLabelServiceImpl
-publicDefaultVisibilityLabelServiceImpl()
+publicDefaultVisibilityLabelServiceImpl()
 
 
 
@@ -493,7 +493,7 @@ implements 
 
 setConf
-publicvoidsetConf(org.apache.hadoop.conf.Configurationconf)
+publicvoidsetConf(org.apache.hadoop.conf.Configurationconf)
 
 Specified by:
 setConfin 
interfaceorg.apache.hadoop.conf.Configurable
@@ -506,7 +506,7 @@ implements 
 
 getConf
-publicorg.apache.hadoop.conf.ConfigurationgetConf()
+publicorg.apache.hadoop.conf.ConfigurationgetConf()
 
 Specified by:
 getConfin 
interfaceorg.apache.hadoop.conf.Configurable
@@ -519,7 +519,7 @@ implements 
 
 init
-publicvoidinit(RegionCoprocessorEnvironmente)
+publicvoidinit(RegionCoprocessorEnvironmente)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:VisibilityLabelService
 System calls this after opening of regions. Gives a chance 
for the VisibilityLabelService to so
@@ -540,7 +540,7 @@ implements 
 
 getExistingLabelsWithAuths
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellgetExistingLabelsWithAuths()
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellgetExistingLabelsWithAuths()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -554,7 +554,7 @@ implements 
 
 extractLabelsAndAuths
-protectedPairhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
-156import 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index c9a0991..e165861 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -2983,9 +2983,7 @@
 append(Append)
 - Method in class org.apache.hadoop.hbase.client.AsyncTableImpl
 
 append(Append)
 - Method in class org.apache.hadoop.hbase.client.HTable
-
-Appends values to one or more columns within a single 
row.
-
+
 append(Append)
 - Method in class org.apache.hadoop.hbase.client.RawAsyncTableImpl
 
 append(Append)
 - Method in interface org.apache.hadoop.hbase.client.Table
@@ -3551,13 +3549,6 @@
 
 arrayOffset()
 - Method in class org.apache.hadoop.hbase.nio.SingleByteBuff
 
-ArrayUtils 
- Class in org.apache.hadoop.hbase.util
-
-A set of array utility functions that return reasonable 
values in cases where an array is
- allocated or if it is null
-
-ArrayUtils()
 - Constructor for class org.apache.hadoop.hbase.util.ArrayUtils
-
 ArrayValueCollection(CopyOnWriteArrayMap.ArrayHolderK,
 V) - Constructor for class org.apache.hadoop.hbase.types.CopyOnWriteArrayMap.ArrayValueCollection
 
 ArrayValueIterator(CopyOnWriteArrayMap.ArrayHolderK,
 V) - Constructor for class org.apache.hadoop.hbase.types.CopyOnWriteArrayMap.ArrayValueIterator
@@ -5502,9 +5493,7 @@
 Batch()
 - Constructor for class org.apache.hadoop.hbase.client.coprocessor.Batch
 
 batch(List?
 extends Row, Object[]) - Method in class 
org.apache.hadoop.hbase.client.HTable
-
-Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
-
+
 batch(List?
 extends Row, Object[], int) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
 batch(List?
 extends Row) - Method in class 
org.apache.hadoop.hbase.client.RawAsyncTableImpl
@@ -5565,9 +5554,7 @@
 A simple version of batch.
 
 batchCallback(List?
 extends Row, Object[], Batch.CallbackR) - Method in 
class org.apache.hadoop.hbase.client.HTable
-
-Same as Table.batch(List,
 Object[]), but with a callback.
-
+
 batchCallback(List?
 extends Row, Object[], Batch.CallbackR) - Method in 
interface org.apache.hadoop.hbase.client.Table
 
 Same as Table.batch(List,
 Object[]), but with a callback.
@@ -5579,11 +5566,7 @@
 batchCoprocessorService(Descriptors.MethodDescriptor,
 Message, byte[], byte[], R) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
 batchCoprocessorService(Descriptors.MethodDescriptor,
 Message, byte[], byte[], R, Batch.CallbackR) - Method in 
class org.apache.hadoop.hbase.client.HTable
-
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+
 batchCoprocessorService(Descriptors.MethodDescriptor,
 Message, byte[], byte[], R) - Method in interface 
org.apache.hadoop.hbase.client.Table
 
 Creates an instance of the given Service 
subclass for each table
@@ -9816,8 +9799,7 @@
 
 checkAndDelete(byte[],
 byte[], byte[], byte[], Delete) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete) - Method 
in class org.apache.hadoop.hbase.client.HTable
 
@@ -9825,25 +9807,25 @@
 
 checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 checkAndDelete(byte[],
 byte[], byte[], byte[], Delete) - Method in interface 
org.apache.hadoop.hbase.client.Table
 
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete) - Method 
in interface org.apache.hadoop.hbase.client.Table
 
 Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use
-  Table.checkAndDelete(byte[],
 byte[], byte[], byte[], Delete)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
 
 
 checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete) - Method in 
interface org.apache.hadoop.hbase.client.Table
 
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 checkAndDelete(byte[],
 byte[], byte[], byte[], Delete) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
 
@@ -9852,7 +9834,9 @@
 Deprecated.
 
 checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html 
b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
index 5066209..c481f4d 100644
--- a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
+++ b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":6,"i1":18,"i2":6,"i3":6,"i4":6};
+var methods = {"i0":18,"i1":18,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":18};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -101,24 +101,19 @@ var activeTableTab = "activeTableTab";
 
 
 All Superinterfaces:
-Cell, http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
+Cell, http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, HeapSize, RawCell
 
 
 All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
+BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
 
 
 
 @InterfaceAudience.Private
-public interface ExtendedCell
-extends RawCell, SettableSequenceId, SettableTimestamp, HeapSize, http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable
+public interface ExtendedCell
+extends RawCell, HeapSize, http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable
 Extension to Cell with server side required 
functions. Server side Cell implementations
  must implement this.
-
-See Also:
-SettableSequenceId, 
-SettableTimestamp
-
 
 
 
@@ -164,7 +159,7 @@ extends Method and Description
 
 
-ExtendedCell
+default ExtendedCell
 deepClone()
 Does a deep copy of the contents to a new memory area and 
returns it as a new cell.
 
@@ -177,18 +172,36 @@ extends 
-int
+default int
 getSerializedSize(booleanwithTags)
 
 
 void
+setSequenceId(longseqId)
+Sets with the given seqId.
+
+
+
+void
+setTimestamp(byte[]ts)
+Sets with the given timestamp.
+
+
+
+void
+setTimestamp(longts)
+Sets with the given timestamp.
+
+
+
+default void
 write(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
  intoffset)
 Write this Cell into the given buf's offset in a KeyValue format.
 
 
-
-int
+
+default int
 write(http://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true;
 title="class or interface in java.io">OutputStreamout,
  booleanwithTags)
 Write this cell to an OutputStream in a KeyValue format.
@@ -210,20 +223,6 @@ extends getFamilyArray,
 getFamilyLength,
 getFamilyOffset,
 getQualifierArray,
 getQualifierLength,
 getQualifierOffset,
 getRowArray,
 getRowLength,
 getRowOffset,
 getSequenceId,
 getTagsArray, getTagsLength,
 getTagsOffset,
 getTimestamp,
 getTypeByte,
 getValueArray,
 getValueLength,
 getValueOffset
 
 
-
-
-
-Methods inherited from interfaceorg.apache.hadoop.hbase.SettableSequenceId
-setSequenceId
-
-
-
-
-
-Methods inherited from interfaceorg.apache.hadoop.hbase.SettableTimestamp
-setTimestamp,
 setTimestamp
-
-
 
 
 
@@ -250,7 +249,7 @@ extends 
 
 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 312947a..b283abc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -32,317 +32,322 @@
 024import java.util.NavigableMap;
 025import java.util.TreeMap;
 026import java.util.UUID;
-027
-028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.KeyValue;
-031import 
org.apache.yetus.audience.InterfaceAudience;
-032import 
org.apache.hadoop.hbase.io.TimeRange;
-033import 
org.apache.hadoop.hbase.security.access.Permission;
-034import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-035import 
org.apache.hadoop.hbase.util.Bytes;
-036import 
org.apache.hadoop.hbase.util.ClassSize;
-037
-038/**
-039 * Used to perform Increment operations 
on a single row.
-040 * p
-041 * This operation ensures atomicity to 
readers. Increments are done
-042 * under a single row lock, so write 
operations to a row are synchronized, and
-043 * readers are guaranteed to see this 
operation fully completed.
-044 * p
-045 * To increment columns of a row, 
instantiate an Increment object with the row
-046 * to increment.  At least one column to 
increment must be specified using the
-047 * {@link #addColumn(byte[], byte[], 
long)} method.
-048 */
-049@InterfaceAudience.Public
-050public class Increment extends Mutation 
implements ComparableRow {
-051  private static final long HEAP_OVERHEAD 
=  ClassSize.REFERENCE + ClassSize.TIMERANGE;
-052  private TimeRange tr = new 
TimeRange();
-053
-054  /**
-055   * Create a Increment operation for the 
specified row.
-056   * p
-057   * At least one column must be 
incremented.
-058   * @param row row key (we will make a 
copy of this).
-059   */
-060  public Increment(byte [] row) {
-061this(row, 0, row.length);
-062  }
-063
-064  /**
-065   * Create a Increment operation for the 
specified row.
-066   * p
-067   * At least one column must be 
incremented.
-068   * @param row row key (we will make a 
copy of this).
-069   */
-070  public Increment(final byte [] row, 
final int offset, final int length) {
-071checkRow(row, offset, length);
-072this.row = Bytes.copy(row, offset, 
length);
-073  }
-074  /**
-075   * Copy constructor
-076   * @param i
-077   */
-078  public Increment(Increment i) {
-079this.row = i.getRow();
-080this.ts = i.getTimeStamp();
-081this.tr = i.getTimeRange();
-082
this.familyMap.putAll(i.getFamilyCellMap());
-083for (Map.EntryString, byte[] 
entry : i.getAttributesMap().entrySet()) {
-084  this.setAttribute(entry.getKey(), 
entry.getValue());
-085}
-086super.setPriority(i.getPriority());
-087  }
-088
-089  /**
-090   * Add the specified KeyValue to this 
operation.
-091   * @param cell individual Cell
-092   * @return this
-093   * @throws java.io.IOException e
-094   */
-095  public Increment add(Cell cell) throws 
IOException{
-096byte [] family = 
CellUtil.cloneFamily(cell);
-097ListCell list = 
getCellList(family);
-098//Checking that the row of the kv is 
the same as the put
-099if (!CellUtil.matchingRows(cell, 
this.row)) {
-100  throw new WrongRowIOException("The 
row in " + cell +
-101" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
-102}
-103list.add(cell);
-104return this;
-105  }
-106
-107  /**
-108   * Increment the column from the 
specific family with the specified qualifier
-109   * by the specified amount.
-110   * p
-111   * Overrides previous calls to 
addColumn for this family and qualifier.
-112   * @param family family name
-113   * @param qualifier column qualifier
-114   * @param amount amount to increment 
by
-115   * @return the Increment object
-116   */
-117  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
-118if (family == null) {
-119  throw new 
IllegalArgumentException("family cannot be null");
-120}
-121ListCell list = 
getCellList(family);
-122KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-123list.add(kv);
-124return this;
-125  }
-126
-127  /**
-128   * Gets the TimeRange used for this 
increment.
-129   * @return TimeRange
-130   */
-131  public TimeRange getTimeRange() {
-132return this.tr;
-133  }
-134
-135  /**
-136   * Sets the TimeRange to be used on the 
Get for this increment.
-137   * p
-138   * This is useful for when you have 
counters that only last for specific
-139   * periods of time (ie. counters that 
are partitioned by time).  By setting
-140   * the range of valid times for this 
increment, 

[28/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 3edfbef..9707b2c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null  
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null  
perCfExtras.length()  0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Mapbyte[], 
ListPath committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
index cc13777..a2313bc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class RSProcedureDispatcher
+public class RSProcedureDispatcher
 extends RemoteProcedureDispatcherMasterProcedureEnv,ServerName
 implements ServerListener
 A remote procecdure dispatcher for regionservers.
@@ -148,11 +148,17 @@ implements 
 private class
-RSProcedureDispatcher.CloseRegionRemoteCall
+RSProcedureDispatcher.CloseRegionRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to close regions using old
+ AdminService#closeRegion(RpcController, CloseRegionRequest, 
RpcCallback) rpc.
+
 
 
 protected class
-RSProcedureDispatcher.CompatRemoteProcedureResolver
+RSProcedureDispatcher.CompatRemoteProcedureResolver
+Compatibility class to open and close regions using old 
endpoints (openRegion/closeRegion) in
+ AdminProtos.AdminService.
+
 
 
 protected class
@@ -160,7 +166,10 @@ implements 
 private class
-RSProcedureDispatcher.OpenRegionRemoteCall
+RSProcedureDispatcher.OpenRegionRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to open regions using old
+ AdminService#openRegion(RpcController, OpenRegionRequest, 
RpcCallback) rpc.
+
 
 
 static class
@@ -178,10 +187,6 @@ implements private static interface
 RSProcedureDispatcher.RemoteProcedureResolver
 
-
-static class
-RSProcedureDispatcher.ServerOperation
-
 
 
 
@@ -217,15 +222,19 @@ implements master
 
 
+private MasterProcedureEnv
+procedureEnv
+
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
 
-
+
 private static int
 RS_VERSION_WITH_EXEC_PROCS
 
-
-protected long
+
+private long
 rsStartupWaitTime
 
 
@@ -281,7 +290,7 @@ implements 
 protected void
 remoteDispatch(ServerNameserverName,
-  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureoperations)
+  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures)
 
 
 void
@@ -298,8 +307,11 @@ implements 
 void
 splitAndResolveOperation(ServerNameserverName,
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureoperations,
-RSProcedureDispatcher.RemoteProcedureResolverresolver)
+http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures,
+RSProcedureDispatcher.RemoteProcedureResolverresolver)
+Fetches RemoteProcedureDispatcher.RemoteOperations
+ from the given remoteProcedures and groups them by class of the 
returned operation.
+
 
 
 boolean
@@ -351,7 +363,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -360,7 +372,7 @@ implements 
 
 RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -373,7 +385,7 @@ implements 
 
 DEFAULT_RS_RPC_STARTUP_WAIT_TIME
-private static finalint DEFAULT_RS_RPC_STARTUP_WAIT_TIME
+private static finalint DEFAULT_RS_RPC_STARTUP_WAIT_TIME
 
 See Also:
 Constant
 Field Values
@@ -386,7 +398,7 @@ implements 
 
 RS_VERSION_WITH_EXEC_PROCS
-private static finalint RS_VERSION_WITH_EXEC_PROCS
+private static finalint RS_VERSION_WITH_EXEC_PROCS
 
 See Also:
 Constant
 Field Values
@@ -399,16 +411,25 @@ implements 
 
 master
-protected finalMasterServices master
+protected finalMasterServices master
 
 
 
 
 
-
+
 
 rsStartupWaitTime
-protected finallong rsStartupWaitTime
+private finallong rsStartupWaitTime
+
+
+
+
+
+
+
+procedureEnv
+privateMasterProcedureEnv 

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 675c3af..4f79024 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -453,25 +453,27 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName)
-Compact a table.
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
+   byte[]columnFamily,
+   booleanmajor,
+   CompactTypecompactType)
+Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName,
-   byte[]columnFamily)
+compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
 Compact a column family within a table.
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName,
-   byte[]columnFamily,
-   booleanmajor,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
CompactTypecompactType)
-Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
+Compact a table.
 
 
 
@@ -711,7 +713,8 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
-getCompactionState(TableNametableName)
+getCompactionState(TableNametableName,
+  CompactTypecompactType)
 Get the current compaction state of a table.
 
 
@@ -1052,15 +1055,17 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName)
-Major compact a table.
+majorCompact(TableNametableName,
+byte[]columnFamily,
+CompactTypecompactType)
+Major compact a column family within a table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName,
-byte[]columnFamily)
-Major compact a column family within a table.
+majorCompact(TableNametableName,
+CompactTypecompactType)
+Major compact a table.
 
 
 
@@ -1374,7 +1379,7 @@ implements AsyncAdmin
-addReplicationPeer,
 balance,
 getBackupMasters,
 getMaster,
 getMasterCoprocessors,
 getMasterInfoPort,
 getRegionServers,
 listDeadServers,
 listTableDescriptors,
 listTableNames,
 snapshot,
 snapshot
+addReplicationPeer,
 balance,
 compact,
 compact,
 getBackupMasters,
 getCompactionState,
 getMaster,
 getMasterCoprocessors, getMasterInfoPort,
 getRegionServers,
 listDeadServers,
 listTableDescriptors,
 listTableNames,
 majorCompact,
 majorCompact,
 snapshot, snapshot
 
 
 
@@ -2218,43 +2223,46 @@ implements 
+
 
 
 
 
 compact
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index 166fc15..07e4dd5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -831,235 +831,236 @@
 823  
org.apache.commons.lang3.ArrayUtils.class,
 824  
com.fasterxml.jackson.databind.ObjectMapper.class,
 825  
com.fasterxml.jackson.core.Versioned.class,
-826  
com.fasterxml.jackson.annotation.JsonView.class);
-827  }
-828
-829  /**
-830   * Returns a classpath string built 
from the content of the "tmpjars" value in {@code conf}.
-831   * Also exposed to shell scripts via 
`bin/hbase mapredcp`.
-832   */
-833  public static String 
buildDependencyClasspath(Configuration conf) {
-834if (conf == null) {
-835  throw new 
IllegalArgumentException("Must provide a configuration object.");
-836}
-837SetString paths = new 
HashSet(conf.getStringCollection("tmpjars"));
-838if (paths.isEmpty()) {
-839  throw new 
IllegalArgumentException("Configuration contains no tmpjars.");
-840}
-841StringBuilder sb = new 
StringBuilder();
-842for (String s : paths) {
-843  // entries can take the form 
'file:/path/to/file.jar'.
-844  int idx = s.indexOf(":");
-845  if (idx != -1) s = s.substring(idx 
+ 1);
-846  if (sb.length()  0) 
sb.append(File.pathSeparator);
-847  sb.append(s);
-848}
-849return sb.toString();
-850  }
-851
-852  /**
-853   * Add the HBase dependency jars as 
well as jars for any of the configured
-854   * job classes to the job 
configuration, so that JobClient will ship them
-855   * to the cluster and add them to the 
DistributedCache.
-856   */
-857  public static void 
addDependencyJars(Job job) throws IOException {
-858
addHBaseDependencyJars(job.getConfiguration());
-859try {
-860  
addDependencyJarsForClasses(job.getConfiguration(),
-861  // when making changes here, 
consider also mapred.TableMapReduceUtil
-862  // pull job classes
-863  job.getMapOutputKeyClass(),
-864  job.getMapOutputValueClass(),
-865  job.getInputFormatClass(),
-866  job.getOutputKeyClass(),
-867  job.getOutputValueClass(),
-868  job.getOutputFormatClass(),
-869  job.getPartitionerClass(),
-870  job.getCombinerClass());
-871} catch (ClassNotFoundException e) 
{
-872  throw new IOException(e);
-873}
-874  }
-875
-876  /**
-877   * Add the jars containing the given 
classes to the job's configuration
-878   * such that JobClient will ship them 
to the cluster and add them to
-879   * the DistributedCache.
-880   * @deprecated rely on {@link 
#addDependencyJars(Job)} instead.
-881   */
-882  @Deprecated
-883  public static void 
addDependencyJars(Configuration conf,
-884  Class?... classes) throws 
IOException {
-885LOG.warn("The 
addDependencyJars(Configuration, Class?...) method has been deprecated 
since it"
-886 + " is easy to use 
incorrectly. Most users should rely on addDependencyJars(Job) " +
-887 "instead. See HBASE-8386 for 
more details.");
-888addDependencyJarsForClasses(conf, 
classes);
-889  }
-890
-891  /**
-892   * Add the jars containing the given 
classes to the job's configuration
-893   * such that JobClient will ship them 
to the cluster and add them to
-894   * the DistributedCache.
-895   *
-896   * N.B. that this method at most adds 
one jar per class given. If there is more than one
-897   * jar available containing a class 
with the same name as a given class, we don't define
-898   * which of those jars might be 
chosen.
-899   *
-900   * @param conf The Hadoop Configuration 
to modify
-901   * @param classes will add just those 
dependencies needed to find the given classes
-902   * @throws IOException if an underlying 
library call fails.
-903   */
-904  @InterfaceAudience.Private
-905  public static void 
addDependencyJarsForClasses(Configuration conf,
-906  Class?... classes) throws 
IOException {
-907
-908FileSystem localFs = 
FileSystem.getLocal(conf);
-909SetString jars = new 
HashSet();
-910// Add jars that are already in the 
tmpjars variable
-911
jars.addAll(conf.getStringCollection("tmpjars"));
-912
-913// add jars as we find them to a map 
of contents jar name so that we can avoid
-914// creating new jars for classes that 
have already been packaged.
-915MapString, String 
packagedClasses = new HashMap();
-916
-917// Add jars containing the specified 
classes
-918for (Class? clazz : classes) 
{
-919  if (clazz == null) continue;

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/class-use/ExtendedCellBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/ExtendedCellBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ExtendedCellBuilder.html
index 851f61b..1ee400c 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ExtendedCellBuilder.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ExtendedCellBuilder.html
@@ -91,6 +91,16 @@
 
 
 
+org.apache.hadoop.hbase.coprocessor
+
+Table of Contents
+
+
+
+org.apache.hadoop.hbase.regionserver
+
+
+
 org.apache.hadoop.hbase.replication
 
 Multi Cluster Replication
@@ -146,132 +156,141 @@
 ExtendedCellBuilderFactory.create(CellBuilderTypetype)
 
 
+static ExtendedCellBuilder
+ExtendedCellBuilderFactory.create(CellBuilderTypetype,
+  booleanallowSeqIdUpdate)
+Allows creating a cell with the given CellBuilderType.
+
+
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setFamily(byte[]family)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setFamily(byte[]family)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setFamily(byte[]family,
  intfOffset,
  intfLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setFamily(byte[]family,
  intfOffset,
  intfLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setQualifier(byte[]qualifier)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setQualifier(byte[]qualifier)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setQualifier(byte[]qualifier,
 intqOffset,
 intqLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setQualifier(byte[]qualifier,
 intqOffset,
 intqLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setRow(byte[]row)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setRow(byte[]row)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setRow(byte[]row,
   intrOffset,
   intrLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setRow(byte[]row,
   intrOffset,
   intrLength)
 
-
+
 ExtendedCellBuilder
-ExtendedCellBuilder.setSequenceId(longseqId)
+ExtendedCellBuilder.setSequenceId(longseqId)
+Internal usage.
+
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setSequenceId(longseqId)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setTags(byte[]tags)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setTags(byte[]tags)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setTags(byte[]tags,
inttagsOffset,
inttagsLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setTags(byte[]tags,
inttagsOffset,
inttagsLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setTimestamp(longtimestamp)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setTimestamp(longtimestamp)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setType(bytetype)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setType(bytetype)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setType(CellBuilder.DataTypetype)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setType(CellBuilder.DataTypetype)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setValue(byte[]value)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setValue(byte[]value)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilder.setValue(byte[]value,
 intvOffset,
 intvLength)
 
-
+
 ExtendedCellBuilder
 ExtendedCellBuilderImpl.setValue(byte[]value,
 intvOffset,
@@ -306,6 +325,44 @@
 
 
 
+
+
+
+Uses of ExtendedCellBuilder in org.apache.hadoop.hbase.coprocessor
+
+Methods in org.apache.hadoop.hbase.coprocessor
 that return ExtendedCellBuilder
+
+Modifier and Type
+Method and Description
+
+
+
+ExtendedCellBuilder
+RegionCoprocessorEnvironment.getCellBuilder()
+Returns a CellBuilder so that coprocessors can build 
cells.
+
+
+
+
+
+
+
+
+Uses of ExtendedCellBuilder in org.apache.hadoop.hbase.regionserver
+
+Methods in org.apache.hadoop.hbase.regionserver
 that return ExtendedCellBuilder
+
+Modifier and Type
+Method and Description
+
+
+
+ExtendedCellBuilder
+RegionCoprocessorHost.RegionEnvironment.getCellBuilder()
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/class-use/RawCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/RawCell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/RawCell.html
new file mode 100644
index 000..897bbb5
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/RawCell.html
@@ -0,0 +1,325 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Interface org.apache.hadoop.hbase.RawCell (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+