[25/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 68302bf..a5a8905 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -2197,1768 +2197,1775 @@
 2189  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2190}
 2191
-2192for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
-2193  if (hcd.getTimeToLive() = 0) 
{
-2194String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
-2195
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2196  }
-2197
-2198  // check blockSize
-2199  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
-2200String message = "Block size for 
column family " + hcd.getNameAsString()
-2201+ "  must be between 1K and 
16MB.";
+2192// check that we have minimum 1 
region replicas
+2193int regionReplicas = 
htd.getRegionReplication();
+2194if (regionReplicas  1) {
+2195  String message = "Table region 
replication should be at least one.";
+2196  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2197}
+2198
+2199for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
+2200  if (hcd.getTimeToLive() = 0) 
{
+2201String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
 2202
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2203  }
 2204
-2205  // check versions
-2206  if (hcd.getMinVersions()  0) 
{
-2207String message = "Min versions 
for column family " + hcd.getNameAsString()
-2208  + "  must be positive.";
+2205  // check blockSize
+2206  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
+2207String message = "Block size for 
column family " + hcd.getNameAsString()
+2208+ "  must be between 1K and 
16MB.";
 2209
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2210  }
-2211  // max versions already being 
checked
-2212
-2213  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
-2214  //  does not throw 
IllegalArgumentException
-2215  // check minVersions = 
maxVerions
-2216  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
-2217String message = "Min versions 
for column family " + hcd.getNameAsString()
-2218+ " must be less than the 
Max versions.";
-2219
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2220  }
-2221
-  // check replication scope
-2223  checkReplicationScope(hcd);
-2224  // check bloom filter type
-2225  checkBloomFilterType(hcd);
-2226
-2227  // check data replication factor, 
it can be 0(default value) when user has not explicitly
-2228  // set the value, in this case we 
use default replication factor set in the file system.
-2229  if (hcd.getDFSReplication()  
0) {
-2230String message = "HFile 
Replication for column family " + hcd.getNameAsString()
-2231+ "  must be greater than 
zero.";
-2232
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2233  }
-2234
-2235  // TODO: should we check 
coprocessors and encryption ?
-2236}
-2237  }
-2238
-2239  private void 
checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
-2240// check replication scope
-2241WALProtos.ScopeType scop = 
WALProtos.ScopeType.valueOf(hcd.getScope());
-2242if (scop == null) {
-2243  String message = "Replication 
scope for column family "
-2244  + hcd.getNameAsString() + " is 
" + hcd.getScope() + " which is invalid.";
+2211
+2212  // check versions
+2213  if (hcd.getMinVersions()  0) 
{
+2214String message = "Min versions 
for column family " + hcd.getNameAsString()
+2215  + "  must be positive.";
+2216
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2217  }
+2218  // max versions already being 
checked
+2219
+2220  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
+2221  //  does not throw 
IllegalArgumentException
+  // check minVersions = 
maxVerions
+2223  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
+2224String message = "Min versions 
for column family " + hcd.getNameAsString()
+2225+ " must be less than the 
Max versions.";
+2226

[25/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/EventType.html 
b/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
index c3ff4e0..67d3baf 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
@@ -296,128 +296,135 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?i
 
 
 
+M_RS_SWITCH_RPC_THROTTLE
+Messages originating from Master to RS.
+ M_RS_SWITCH_RPC_THROTTLE
+ Master asking RS to switch rpc throttle state.
+
+
+
 M_SERVER_SHUTDOWN
 Master controlled events to be executed on the master
  M_SERVER_SHUTDOWN
  Master is processing shutdown of a RS
 
 
-
+
 M_ZK_REGION_CLOSING
 M_ZK_REGION_CLOSING
  Master adds this region as closing in ZK
 
 
-
+
 M_ZK_REGION_OFFLINE
 M_ZK_REGION_OFFLINE
  Master adds this region as offline in ZK
 
 
-
+
 RS_COMPACTED_FILES_DISCHARGER
 RS compacted files discharger 
 
  RS_COMPACTED_FILES_DISCHARGER
 
 
-
+
 RS_LOG_REPLAY
 RS wal recovery work items (splitting wals) to be executed 
on the RS.
 
  RS_LOG_REPLAY
 
 
-
+
 RS_PARALLEL_SEEK
 RS controlled events to be executed on the RS.
 
  RS_PARALLEL_SEEK
 
 
-
+
 RS_REFRESH_PEER
 RS refresh peer.
 
  RS_REFRESH_PEER
 
 
-
+
 RS_REGION_REPLICA_FLUSH
 RS flush triggering from secondary region replicas to 
primary region replica.
 
 
-
+
 RS_REPLAY_SYNC_REPLICATION_WAL
 RS replay sync replication wal.
 
  RS_REPLAY_SYNC_REPLICATION_WAL
 
 
-
+
 RS_ZK_REGION_CLOSED
 RS_ZK_REGION_CLOSED
 
  RS has finished closing a region.
 
 
-
+
 RS_ZK_REGION_FAILED_OPEN
 RS_ZK_REGION_FAILED_OPEN
 
  RS failed to open a region.
 
 
-
+
 RS_ZK_REGION_MERGED
 RS_ZK_REGION_MERGE
 
  RS region merge has completed and is notifying the master.
 
 
-
+
 RS_ZK_REGION_MERGING
 RS_ZK_REGION_MERGING
 
  RS has started merging regions after master says it's ok to move on.
 
 
-
+
 RS_ZK_REGION_OPENED
 RS_ZK_REGION_OPENED
 
  RS has finished opening a region.
 
 
-
+
 RS_ZK_REGION_OPENING
 RS_ZK_REGION_OPENING
 
  RS is in process of opening a region.
 
 
-
+
 RS_ZK_REGION_SPLIT
 RS_ZK_REGION_SPLIT
 
  RS split has completed and is notifying the master.
 
 
-
+
 RS_ZK_REGION_SPLITTING
 RS_ZK_REGION_SPLITTING
 
  RS has started a region split after master says it's ok to move on.
 
 
-
+
 RS_ZK_REQUEST_REGION_MERGE
 RS_ZK_REQUEST_REGION_MERGE
 
  RS has requested to merge two regions.
 
 
-
+
 RS_ZK_REQUEST_REGION_SPLIT
 RS_ZK_REQUEST_REGION_SPLIT
 
@@ -726,13 +733,25 @@ the order they are declared.
  Master asking RS to open a  priority region.
 
 
+
+
+
+
+
+M_RS_SWITCH_RPC_THROTTLE
+public static finalEventType M_RS_SWITCH_RPC_THROTTLE
+Messages originating from Master to RS.
+ M_RS_SWITCH_RPC_THROTTLE
+ Master asking RS to switch rpc throttle state.
+
+
 
 
 
 
 
 C_M_MERGE_REGION
-public static finalEventType C_M_MERGE_REGION
+public static finalEventType C_M_MERGE_REGION
 Messages originating from Client to Master.
  C_M_MERGE_REGION
  Client asking Master to merge regions.
@@ -744,7 +763,7 @@ the order they are declared.
 
 
 C_M_DELETE_TABLE
-public static finalEventType C_M_DELETE_TABLE
+public static finalEventType C_M_DELETE_TABLE
 Messages originating from Client to Master.
  C_M_DELETE_TABLE
  Client asking Master to delete a table.
@@ -756,7 +775,7 @@ the order they are declared.
 
 
 C_M_DISABLE_TABLE
-public static finalEventType C_M_DISABLE_TABLE
+public static finalEventType C_M_DISABLE_TABLE
 Messages originating from Client to Master.
  C_M_DISABLE_TABLE
  Client asking Master to disable a table.
@@ -768,7 +787,7 @@ the order they are declared.
 
 
 C_M_ENABLE_TABLE
-public static finalEventType C_M_ENABLE_TABLE
+public static finalEventType C_M_ENABLE_TABLE
 Messages originating from Client to Master.
  C_M_ENABLE_TABLE
  Client asking Master to enable a table.
@@ -780,7 +799,7 @@ the order they are declared.
 
 
 C_M_MODIFY_TABLE
-public static finalEventType C_M_MODIFY_TABLE
+public static finalEventType C_M_MODIFY_TABLE
 Messages originating from Client to Master.
  C_M_MODIFY_TABLE
  Client asking Master to modify a table.
@@ -792,7 +811,7 @@ the order they are declared.
 
 
 C_M_ADD_FAMILY
-public static finalEventType C_M_ADD_FAMILY
+public static finalEventType C_M_ADD_FAMILY
 Messages originating from Client to Master.
  C_M_ADD_FAMILY
  Client asking Master to add family to table.
@@ -804,7 +823,7 @@ the order they are declared.
 
 
 C_M_DELETE_FAMILY
-public static finalEventType C_M_DELETE_FAMILY
+public static finalEventType C_M_DELETE_FAMILY
 Messages originating from Client to Master.
  C_M_DELETE_FAMILY
  Client asking Master to delete family of table.
@@ -816,7 +835,7 @@ the order they are declared.
 
 
 C_M_MODIFY_FAMILY
-public static finalEventType C_M_MODIFY_FAMILY
+public static finalEventType 

[25/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
index 9aa9b59..ac7e0ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
@@ -28,520 +28,565 @@
 020import static 
org.apache.hadoop.hbase.HConstants.NINES;
 021import static 
org.apache.hadoop.hbase.HConstants.ZEROES;
 022import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-023import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-025import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-026import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-027import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-028
-029import java.io.IOException;
-030import java.util.Arrays;
-031import java.util.HashSet;
-032import java.util.Iterator;
-033import java.util.LinkedHashMap;
-034import java.util.Map;
-035import java.util.Optional;
-036import java.util.Set;
-037import 
java.util.concurrent.CompletableFuture;
-038import 
java.util.concurrent.ConcurrentHashMap;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentNavigableMap;
-041import 
java.util.concurrent.ConcurrentSkipListMap;
-042import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.MetaTableAccessor;
-046import 
org.apache.hadoop.hbase.RegionLocations;
-047import 
org.apache.hadoop.hbase.TableName;
-048import 
org.apache.hadoop.hbase.TableNotFoundException;
-049import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.yetus.audience.InterfaceAudience;
-052import org.slf4j.Logger;
-053import org.slf4j.LoggerFactory;
-054
-055import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-056
-057/**
-058 * The asynchronous locator for regions 
other than meta.
-059 */
-060@InterfaceAudience.Private
-061class AsyncNonMetaRegionLocator {
-062
-063  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
+023import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
+024import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
+025import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
+026import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.mergeRegionLocations;
+027import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
+028import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
+029import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+030import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
+031import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
+032import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
+033
+034import java.io.IOException;
+035import java.util.Arrays;
+036import java.util.HashSet;
+037import java.util.Iterator;
+038import java.util.LinkedHashMap;
+039import java.util.Map;
+040import java.util.Optional;
+041import java.util.Set;
+042import 
java.util.concurrent.CompletableFuture;
+043import 
java.util.concurrent.ConcurrentHashMap;
+044import 
java.util.concurrent.ConcurrentMap;
+045import 
java.util.concurrent.ConcurrentNavigableMap;
+046import 
java.util.concurrent.ConcurrentSkipListMap;
+047import 
org.apache.commons.lang3.ObjectUtils;
+048import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+049import 
org.apache.hadoop.hbase.HBaseIOException;
+050import 
org.apache.hadoop.hbase.HConstants;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.MetaTableAccessor;
+053import 
org.apache.hadoop.hbase.RegionLocations;
+054import 
org.apache.hadoop.hbase.TableName;
+055import 
org.apache.hadoop.hbase.TableNotFoundException;
+056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
+057import 
org.apache.hadoop.hbase.util.Bytes;
+058import 
org.apache.yetus.audience.InterfaceAudience;
+059import org.slf4j.Logger;
+060import org.slf4j.LoggerFactory;
+061
+062import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+063import 

[25/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
index c4e8c8b..aa58108 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
@@ -82,602 +82,613 @@
 074  public static final String 
USER_COPROCESSORS_ENABLED_CONF_KEY =
 075"hbase.coprocessor.user.enabled";
 076  public static final boolean 
DEFAULT_USER_COPROCESSORS_ENABLED = true;
-077
-078  private static final Logger LOG = 
LoggerFactory.getLogger(CoprocessorHost.class);
-079  protected Abortable abortable;
-080  /** Ordered set of loaded coprocessors 
with lock */
-081  protected final SortedListE 
coprocEnvironments =
-082  new SortedList(new 
EnvironmentPriorityComparator());
-083  protected Configuration conf;
-084  // unique file prefix to use for local 
copies of jars when classloading
-085  protected String pathPrefix;
-086  protected AtomicInteger loadSequence = 
new AtomicInteger();
-087
-088  public CoprocessorHost(Abortable 
abortable) {
-089this.abortable = abortable;
-090this.pathPrefix = 
UUID.randomUUID().toString();
-091  }
-092
-093  /**
-094   * Not to be confused with the 
per-object _coprocessors_ (above),
-095   * coprocessorNames is static and 
stores the set of all coprocessors ever
-096   * loaded by any thread in this JVM. It 
is strictly additive: coprocessors are
-097   * added to coprocessorNames, by 
checkAndLoadInstance() but are never removed, since
-098   * the intention is to preserve a 
history of all loaded coprocessors for
-099   * diagnosis in case of server crash 
(HBASE-4014).
-100   */
-101  private static SetString 
coprocessorNames =
-102  Collections.synchronizedSet(new 
HashSetString());
-103
-104  public static SetString 
getLoadedCoprocessors() {
-105synchronized (coprocessorNames) {
-106  return new 
HashSet(coprocessorNames);
-107}
-108  }
-109
-110  /**
-111   * Used to create a parameter to the 
HServerLoad constructor so that
-112   * HServerLoad can provide information 
about the coprocessors loaded by this
-113   * regionserver.
-114   * (HBASE-4070: Improve region server 
metrics to report loaded coprocessors
-115   * to master).
-116   */
-117  public SetString 
getCoprocessors() {
-118SetString returnValue = new 
TreeSet();
-119for (E e: coprocEnvironments) {
-120  
returnValue.add(e.getInstance().getClass().getSimpleName());
-121}
-122return returnValue;
-123  }
-124
-125  /**
-126   * Load system coprocessors once only. 
Read the class names from configuration.
-127   * Called by constructor.
-128   */
-129  protected void 
loadSystemCoprocessors(Configuration conf, String confKey) {
-130boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
-131  DEFAULT_COPROCESSORS_ENABLED);
-132if (!coprocessorsEnabled) {
-133  return;
-134}
-135
-136Class? implClass;
-137
-138// load default coprocessors from 
configure file
-139String[] defaultCPClasses = 
conf.getStrings(confKey);
-140if (defaultCPClasses == null || 
defaultCPClasses.length == 0)
-141  return;
-142
-143int priority = 
Coprocessor.PRIORITY_SYSTEM;
-144for (String className : 
defaultCPClasses) {
-145  className = className.trim();
-146  if (findCoprocessor(className) != 
null) {
-147// If already loaded will just 
continue
-148LOG.warn("Attempted duplicate 
loading of " + className + "; skipped");
-149continue;
-150  }
-151  ClassLoader cl = 
this.getClass().getClassLoader();
-152  
Thread.currentThread().setContextClassLoader(cl);
-153  try {
-154implClass = 
cl.loadClass(className);
-155// Add coprocessors as we go to 
guard against case where a coprocessor is specified twice
-156// in the configuration
-157E env = 
checkAndLoadInstance(implClass, priority, conf);
-158if (env != null) {
-159  
this.coprocEnvironments.add(env);
-160  LOG.info("System coprocessor {} 
loaded, priority={}.", className, priority);
-161  ++priority;
-162}
-163  } catch (Throwable t) {
-164// We always abort if system 
coprocessors cannot be loaded
-165abortServer(className, t);
-166  }
-167}
-168  }
-169
-170  /**
-171   * Load a coprocessor implementation 
into the host
-172   * @param path path to implementation 
jar
-173   * @param className the main class 
name
-174   * @param priority 

[25/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallable.html
index 1f18a46..23527cc 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallable.html
@@ -299,6 +299,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.RetryingRPC.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.RetryingRPC.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.RetryingRPC.html
index 9a2ae67..33aa682 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.RetryingRPC.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.RetryingRPC.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.html
index 500c736..1650e8f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ScannerCallableWithReplicas.html
@@ -178,6 +178,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/SecureBulkLoadClient.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SecureBulkLoadClient.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SecureBulkLoadClient.html
index 77a7156..45bc20c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SecureBulkLoadClient.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SecureBulkLoadClient.html
@@ -173,6 +173,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/ServerStatisticTracker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ServerStatisticTracker.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ServerStatisticTracker.html
index 504b2ab..6fbcc73 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ServerStatisticTracker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ServerStatisticTracker.html
@@ -228,6 +228,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/class-use/ServiceCaller.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ServiceCaller.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ServiceCaller.html
index db0e4cd..f27c44e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ServiceCaller.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ServiceCaller.html
@@ -281,6 +281,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 


[25/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index 7ec20da..659c8e5 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -142,9 +142,9 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
-org.apache.hadoop.hbase.security.access.AccessController.OpType
 org.apache.hadoop.hbase.security.access.Permission.Action
 org.apache.hadoop.hbase.security.access.Permission.Scope
+org.apache.hadoop.hbase.security.access.AccessController.OpType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
index 13f949c..6ef281d 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -192,8 +192,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection
-org.apache.hadoop.hbase.security.AuthMethod
 org.apache.hadoop.hbase.security.SaslStatus
+org.apache.hadoop.hbase.security.AuthMethod
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
index 0019854..61c1e7c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class VisibilityController.DeleteVersionVisibilityExpressionFilter
+private static class VisibilityController.DeleteVersionVisibilityExpressionFilter
 extends FilterBase
 
 
@@ -268,7 +268,7 @@ extends 
 
 deleteCellVisTags
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag deleteCellVisTags
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTag deleteCellVisTags
 
 
 
@@ -277,7 +277,7 @@ extends 
 
 deleteCellVisTagsFormat
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true;
 title="class or interface in java.lang">Byte deleteCellVisTagsFormat
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true;
 title="class or interface in java.lang">Byte deleteCellVisTagsFormat
 
 
 
@@ -294,7 +294,7 @@ extends 
 
 DeleteVersionVisibilityExpressionFilter
-publicDeleteVersionVisibilityExpressionFilter(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagdeleteCellVisTags,
+publicDeleteVersionVisibilityExpressionFilter(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagdeleteCellVisTags,
https://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true;
 title="class or interface in 
java.lang">BytedeleteCellVisTagsFormat)
 
 
@@ -312,7 +312,7 @@ 

[25/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.html
index 55ab978..21b0e6d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HeapMemoryManager
+public class HeapMemoryManager
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Manages tuning of Heap memory using 
HeapMemoryTuner. Most part of the heap memory is
  split between Memstores and BlockCache. This manager helps in tuning sizes of 
both these
@@ -301,7 +301,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Constructor and Description
 
 
-HeapMemoryManager(ResizableBlockCacheblockCache,
+HeapMemoryManager(BlockCacheblockCache,
  FlushRequestermemStoreFlusher,
  Serverserver,
  RegionServerAccountingregionServerAccounting)
@@ -316,42 +316,39 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 Method Summary
 
-All MethodsStatic MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete Methods
 
 Modifier and Type
 Method and Description
 
 
-static HeapMemoryManager
-create(org.apache.hadoop.conf.Configurationconf,
-  FlushRequestermemStoreFlusher,
-  Serverserver,
-  RegionServerAccountingregionServerAccounting)
-
-
 private boolean
 doInit(org.apache.hadoop.conf.Configurationconf)
 
-
+
 float
 getHeapOccupancyPercent()
 
-
+
 (package private) boolean
 isTunerOn()
 
-
+
 void
 registerTuneObserver(HeapMemoryManager.HeapMemoryTuneObserverobserver)
 
-
+
 void
 start(ChoreServiceservice)
 
-
+
 void
 stop()
 
+
+private ResizableBlockCache
+toResizableBlockCache(BlockCacheblockCache)
+
 
 
 
@@ -380,7 +377,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -389,7 +386,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CONVERT_TO_PERCENTAGE
-private static finalint CONVERT_TO_PERCENTAGE
+private static finalint CONVERT_TO_PERCENTAGE
 
 See Also:
 Constant
 Field Values
@@ -402,7 +399,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLUSTER_MINIMUM_MEMORY_THRESHOLD
-private static finalint CLUSTER_MINIMUM_MEMORY_THRESHOLD
+private static finalint CLUSTER_MINIMUM_MEMORY_THRESHOLD
 
 See Also:
 Constant
 Field Values
@@ -415,7 +412,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 BLOCK_CACHE_SIZE_MAX_RANGE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BLOCK_CACHE_SIZE_MAX_RANGE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BLOCK_CACHE_SIZE_MAX_RANGE_KEY
 
 See Also:
 Constant
 Field Values
@@ -428,7 +425,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 BLOCK_CACHE_SIZE_MIN_RANGE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BLOCK_CACHE_SIZE_MIN_RANGE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BLOCK_CACHE_SIZE_MIN_RANGE_KEY
 
 See Also:
 Constant
 Field Values
@@ -441,7 +438,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 MEMSTORE_SIZE_MAX_RANGE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_SIZE_MAX_RANGE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_SIZE_MAX_RANGE_KEY
 
 See 

[25/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 

[25/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
index 89316ff..162edad 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class DeleteNamespaceProcedure
+public class DeleteNamespaceProcedure
 extends AbstractStateMachineNamespaceProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState
 The procedure to remove a namespace.
 
@@ -191,10 +191,6 @@ extends private NamespaceDescriptor
 nsDescriptor
 
-
-private https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-traceEnabled
-
 
 
 
@@ -210,6 +206,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 
@@ -251,17 +254,17 @@ extends Method and Description
 
 
-protected static void
+private static void
 deleteDirectory(MasterProcedureEnvenv,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 Delete the namespace directories from the file system
 
 
 
-protected static void
-deleteFromNSTable(MasterProcedureEnvenv,
- https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
-delete the row from namespace table
+private static void
+deleteNamespace(MasterProcedureEnvenv,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
+delete the row from the ns family in meta table.
 
 
 
@@ -301,93 +304,52 @@ extends 
-private static TableNamespaceManager
-getTableNamespaceManager(MasterProcedureEnvenv)
-
-
 TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected boolean
 isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback can be triggered.
 
 
-
-private https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-isTraceEnabled()
-The procedure could be restarted from a different 
machine.
-
-
-
+
 private boolean
 prepareDelete(MasterProcedureEnvenv)
 Action before any real action of deleting namespace.
 
 
-
-protected static void
-removeFromZKNamespaceManager(MasterProcedureEnvenv,
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
-remove from ZooKeeper.
-
-
-
-protected static void
+
+private static void
 removeNamespaceQuota(MasterProcedureEnvenv,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 remove quota for the namespace
 
 
-
-private void
-rollbackDeleteDirectory(MasterProcedureEnvenv)
-undo delete directory
-
-
-
+
 protected void
 rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
 called to perform the rollback of the specified state
 
 
-
-private void
-rollbacRemoveNamespaceQuota(MasterProcedureEnvenv)
-undo remove quota for the namespace
-
-
-
+
 protected void
 serializeStateData(ProcedureStateSerializerserializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
-
-private void
-undoDeleteFromNSTable(MasterProcedureEnvenv)
-undo the 

[25/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlLists.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlLists.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlLists.html
index e1f2ceb..78cac0b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlLists.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlLists.html
@@ -139,503 +139,503 @@
 131   * @throws IOException in the case of 
an error accessing the metadata table
 132   */
 133  static void 
addUserPermission(Configuration conf, UserPermission userPerm, Table t,
-134boolean 
mergeExistingPermissions) throws IOException {
-135Permission.Action[] actions = 
userPerm.getActions();
-136byte[] rowKey = 
userPermissionRowKey(userPerm);
-137Put p = new Put(rowKey);
-138byte[] key = 
userPermissionKey(userPerm);
-139
-140if ((actions == null) || 
(actions.length == 0)) {
-141  String msg = "No actions associated 
with user '" + Bytes.toString(userPerm.getUser()) + "'";
-142  LOG.warn(msg);
-143  throw new IOException(msg);
-144}
-145
-146SetPermission.Action 
actionSet = new TreeSetPermission.Action();
-147if(mergeExistingPermissions){
-148  ListUserPermission perms = 
getUserPermissions(conf, rowKey, null, null, null, false);
-149  UserPermission currentPerm = 
null;
-150  for (UserPermission perm : perms) 
{
-151if (Bytes.equals(perm.getUser(), 
userPerm.getUser())
-152 
((userPerm.isGlobal()  ACL_TABLE_NAME.equals(perm.getTableName()))
-153|| 
perm.tableFieldsEqual(userPerm))) {
-154  currentPerm = perm;
-155  break;
-156}
-157  }
-158
-159  if(currentPerm != null  
currentPerm.getActions() != null){
-160
actionSet.addAll(Arrays.asList(currentPerm.getActions()));
-161  }
-162}
-163
-164// merge current action with new 
action.
-165
actionSet.addAll(Arrays.asList(actions));
-166
-167// serialize to byte array.
-168byte[] value = new 
byte[actionSet.size()];
-169int index = 0;
-170for (Permission.Action action : 
actionSet) {
-171  value[index++] = action.code();
-172}
-173
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-174.setRow(p.getRow())
-175.setFamily(ACL_LIST_FAMILY)
-176.setQualifier(key)
-177.setTimestamp(p.getTimestamp())
-178.setType(Type.Put)
-179.setValue(value)
-180.build());
-181if (LOG.isDebugEnabled()) {
-182  LOG.debug("Writing permission with 
rowKey "+
-183  Bytes.toString(rowKey)+" "+
-184  Bytes.toString(key)+": 
"+Bytes.toStringBinary(value)
-185  );
-186}
-187try {
-188  /**
-189   * TODO: Use Table.put(Put) 
instead. This Table.put() happens within the RS. We are already in
-190   * AccessController. Means already 
there was an RPC happened to server (Actual grant call from
-191   * client side). At RpcServer we 
have a ThreadLocal where we keep the CallContext and inside
-192   * that the current RPC called user 
info is set. The table on which put was called is created
-193   * via the RegionCP env and that 
uses a special Connection. The normal RPC channel will be by
-194   * passed here means there would 
have no further contact on to the RpcServer. So the
-195   * ThreadLocal is never getting 
reset. We ran the new put as a super user (User.runAsLoginUser
-196   * where the login user is the user 
who started RS process) but still as per the RPC context
-197   * it is the old user. When 
AsyncProcess was used, the execute happen via another thread from
-198   * pool and so old ThreadLocal 
variable is not accessible and so it looks as if no Rpc context
-199   * and we were relying on the super 
user who starts the RS process.
-200   */
-201  
t.put(Collections.singletonList(p));
-202} finally {
-203  t.close();
-204}
-205  }
-206
-207  static void 
addUserPermission(Configuration conf, UserPermission userPerm, Table t)
-208  throws IOException{
-209addUserPermission(conf, userPerm, t, 
false);
-210  }
-211
-212  /**
-213   * Removes a previously granted 
permission from the stored access control
-214   * lists.  The {@link TablePermission} 
being removed must exactly match what
-215   * is stored -- no wildcard matching is 
attempted.  Ie, if user "bob" has
-216   * been granted "READ" access to the 
"data" table, but only to column family
-217   * plus qualifier "info:colA", then 
trying to call this method with only
-218   * user "bob" and the table name "data" 
(but without specifying the
-219   * column qualifier "info:colA") will 

[25/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 0af8acd..c5f21ac 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -645,1615 +645,1597 @@
 637
proc.afterReplay(getEnvironment());
 638  }
 639});
-640
-641// 4. Push the procedures to the 
timeout executor
-642waitingTimeoutList.forEach(proc - 
{
-643  
proc.afterReplay(getEnvironment());
-644  timeoutExecutor.add(proc);
-645});
-646// 5. restore locks
-647restoreLocks();
-648// 6. Push the procedure to the 
scheduler
-649
failedList.forEach(scheduler::addBack);
-650runnableList.forEach(p - {
-651  p.afterReplay(getEnvironment());
-652  if (!p.hasParent()) {
-653
sendProcedureLoadedNotification(p.getProcId());
-654  }
-655  // If the procedure holds the lock, 
put the procedure in front
-656  // If its parent holds the lock, 
put the procedure in front
-657  // TODO. Is that possible that its 
ancestor holds the lock?
-658  // For now, the deepest procedure 
hierarchy is:
-659  // ModifyTableProcedure - 
ReopenTableProcedure -
-660  // MoveTableProcedure - 
Unassign/AssignProcedure
-661  // But ModifyTableProcedure and 
ReopenTableProcedure won't hold the lock
-662  // So, check parent lock is 
enough(a tricky case is resovled by HBASE-21384).
-663  // If some one change or add new 
procedures making 'grandpa' procedure
-664  // holds the lock, but parent 
procedure don't hold the lock, there will
-665  // be a problem here. We have to 
check one procedure's ancestors.
-666  // And we need to change 
LockAndQueue.hasParentLock(Procedure? proc) method
-667  // to check all ancestors too.
-668  if (p.isLockedWhenLoading() || 
(p.hasParent()  procedures
-669  
.get(p.getParentProcId()).isLockedWhenLoading())) {
-670scheduler.addFront(p, false);
-671  } else {
-672// if it was not, it can wait.
-673scheduler.addBack(p, false);
-674  }
-675});
-676// After all procedures put into the 
queue, signal the worker threads.
-677// Otherwise, there is a race 
condition. See HBASE-21364.
-678scheduler.signalAll();
-679  }
+640// 4. restore locks
+641restoreLocks();
+642
+643// 5. Push the procedures to the 
timeout executor
+644waitingTimeoutList.forEach(proc - 
{
+645  
proc.afterReplay(getEnvironment());
+646  timeoutExecutor.add(proc);
+647});
+648
+649// 6. Push the procedure to the 
scheduler
+650
failedList.forEach(scheduler::addBack);
+651runnableList.forEach(p - {
+652  p.afterReplay(getEnvironment());
+653  if (!p.hasParent()) {
+654
sendProcedureLoadedNotification(p.getProcId());
+655  }
+656  scheduler.addBack(p);
+657});
+658// After all procedures put into the 
queue, signal the worker threads.
+659// Otherwise, there is a race 
condition. See HBASE-21364.
+660scheduler.signalAll();
+661  }
+662
+663  /**
+664   * Initialize the procedure executor, 
but do not start workers. We will start them later.
+665   * p/
+666   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
+667   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
+668   * pending and in-progress 
procedures.
+669   * @param numThreads number of threads 
available for procedure execution.
+670   * @param abortOnCorruption true if you 
want to abort your service in case a corrupted procedure
+671   *  is found on replay. 
otherwise false.
+672   */
+673  public void init(int numThreads, 
boolean abortOnCorruption) throws IOException {
+674// We have numThreads executor + one 
timer thread used for timing out
+675// procedures and triggering periodic 
procedures.
+676this.corePoolSize = numThreads;
+677this.maxPoolSize = 10 * numThreads;
+678LOG.info("Starting {} core workers 
(bigger of cpus/4 or 16) with max (burst) worker count={}",
+679corePoolSize, maxPoolSize);
 680
-681  /**
-682   * Initialize the procedure executor, 
but do not start workers. We will start them later.
-683   * p/
-684   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
-685   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
-686   * pending and in-progress 
procedures.
-687   * @param numThreads number of threads 

[25/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  

[25/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index 7e921cb..5f4ca7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -57,343 +57,345 @@
 049  // A flag represents whether could stop 
skipping KeyValues for MVCC
 050  // if have encountered the next row. 
Only used for reversed scan
 051  private boolean 
stopSkippingKVsIfNextRow = false;
-052  // last iterated KVs by seek (to 
restore the iterator state after reseek)
-053  private Cell last = null;
-054
-055  // flag to indicate if this scanner is 
closed
-056  protected boolean closed = false;
-057
-058
-059  /**
-060   * Scanners are ordered from 0 (oldest) 
to newest in increasing order.
-061   */
-062  protected SegmentScanner(Segment 
segment, long readPoint) {
-063this.segment = segment;
-064this.readPoint = readPoint;
-065//increase the reference count so the 
underlying structure will not be de-allocated
-066this.segment.incScannerCount();
-067iter = segment.iterator();
-068// the initialization of the current 
is required for working with heap of SegmentScanners
-069updateCurrent();
-070if (current == null) {
-071  // nothing to fetch from this 
scanner
-072  close();
-073}
-074  }
-075
-076  /**
-077   * Look at the next Cell in this 
scanner, but do not iterate the scanner
-078   * @return the currently observed 
Cell
-079   */
-080  @Override
-081  public Cell peek() {  // sanity 
check, the current should be always valid
-082if (closed) {
-083  return null;
-084}
-085if (current!=null  
current.getSequenceId()  readPoint) {
-086  throw new RuntimeException("current 
is invalid: read point is "+readPoint+", " +
-087  "while current sequence id is " 
+current.getSequenceId());
-088}
-089return current;
-090  }
-091
-092  /**
-093   * Return the next Cell in this 
scanner, iterating the scanner
-094   * @return the next Cell or null if end 
of scanner
-095   */
-096  @Override
-097  public Cell next() throws IOException 
{
-098if (closed) {
-099  return null;
-100}
-101Cell oldCurrent = current;
-102updateCurrent();  // 
update the currently observed Cell
-103return oldCurrent;
-104  }
-105
-106  /**
-107   * Seek the scanner at or after the 
specified Cell.
-108   * @param cell seek value
-109   * @return true if scanner has values 
left, false if end of scanner
-110   */
-111  @Override
-112  public boolean seek(Cell cell) throws 
IOException {
-113if (closed) {
-114  return false;
-115}
-116if(cell == null) {
-117  close();
-118  return false;
-119}
-120// restart the iterator from new 
key
-121iter = getIterator(cell);
-122// last is going to be reinitialized 
in the next getNext() call
-123last = null;
-124updateCurrent();
-125return (current != null);
-126  }
-127
-128  protected IteratorCell 
getIterator(Cell cell) {
-129return 
segment.tailSet(cell).iterator();
-130  }
-131
-132  /**
-133   * Reseek the scanner at or after the 
specified KeyValue.
-134   * This method is guaranteed to seek at 
or after the required key only if the
-135   * key comes after the current position 
of the scanner. Should not be used
-136   * to seek to a key which may come 
before the current position.
-137   *
-138   * @param cell seek value (should be 
non-null)
-139   * @return true if scanner has values 
left, false if end of scanner
-140   */
-141  @Override
-142  public boolean reseek(Cell cell) throws 
IOException {
-143if (closed) {
-144  return false;
-145}
-146/*
-147See HBASE-4195  HBASE-3855  
HBASE-6591 for the background on this implementation.
-148This code is executed concurrently 
with flush and puts, without locks.
-149The ideal implementation for 
performance would use the sub skip list implicitly
-150pointed by the iterator. 
Unfortunately the Java API does not offer a method to
-151get it. So we remember the last keys 
we iterated to and restore
-152the reseeked set to at least that 
point.
-153*/
-154iter = getIterator(getHighest(cell, 
last));
-155updateCurrent();
-156return (current != null);
-157  }
-158
-159  /**
-160   * Seek the scanner at or before the 
row of specified Cell, it firstly
-161   * tries to seek the scanner at or 
after the specified Cell, return if
-162   * peek KeyValue of scanner has the 
same row with specified Cell,
-163   * otherwise seek the scanner at the 
first Cell of the row which is the
-164   * previous row of 

[25/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
index 5dfe21a..54c3f92 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
@@ -398,7 +398,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, 
incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 setTimeoutFailure,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, holdLock,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting, removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey, setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState, setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.html
index 365810d..420ff1d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.html
@@ -426,7 +426,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, 
incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 setTimeoutFailure,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 

[25/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.html
new file mode 100644
index 000..054cbd1
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSchemaChangeTestBase.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RestoreSnapshotFromClientSchemaChangeTestBase (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class RestoreSnapshotFromClientSchemaChangeTestBase
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientSchemaChangeTestBase
+
+
+
+
+
+
+
+
+
+Direct Known Subclasses:
+TestMobRestoreSnapshotFromClientSchemaChange,
 TestRestoreSnapshotFromClientSchemaChange
+
+
+
+public class RestoreSnapshotFromClientSchemaChangeTestBase
+extends RestoreSnapshotFromClientTestBase
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+admin,
 emptySnapshot,
 FAMILY,
 name,
 snapshot0Rows,
 snapshot1Rows,
 snapshotName0,
 snapshotName1,
 snapshotName2,
 tableName,
 TEST_FAMILY2,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+RestoreSnapshotFromClientSchemaChangeTestBase()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getFamiliesFromFS(org.apache.hadoop.hbase.TableNametableName)
+
+
+protected 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor
+getTestRestoreSchemaChangeHCD()
+
+
+void
+testRestoreSchemaChange()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+countRows,
 createTable,
 getNumReplicas,
 getValidMethodName,
 setup,
 setupCluster,
 setupConf,
 splitRegion,
 tearDown,
 tearDownAfterClass,
 verifyRowCount
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, 

[25/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
index 061ce80..bdfc3f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
@@ -39,2126 +39,2163 @@
 031import java.util.Set;
 032import 
java.util.concurrent.ConcurrentHashMap;
 033import 
java.util.concurrent.CopyOnWriteArrayList;
-034import java.util.concurrent.TimeUnit;
-035import 
java.util.concurrent.atomic.AtomicBoolean;
-036import 
java.util.concurrent.atomic.AtomicInteger;
-037import 
java.util.concurrent.atomic.AtomicLong;
-038import java.util.stream.Collectors;
-039import java.util.stream.Stream;
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-048import 
org.apache.hadoop.hbase.security.User;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.IdLock;
-051import 
org.apache.hadoop.hbase.util.NonceKey;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056
-057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+034import java.util.concurrent.Executor;
+035import java.util.concurrent.Executors;
+036import java.util.concurrent.TimeUnit;
+037import 
java.util.concurrent.atomic.AtomicBoolean;
+038import 
java.util.concurrent.atomic.AtomicInteger;
+039import 
java.util.concurrent.atomic.AtomicLong;
+040import java.util.stream.Collectors;
+041import java.util.stream.Stream;
+042import 
org.apache.hadoop.conf.Configuration;
+043import 
org.apache.hadoop.hbase.HConstants;
+044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
+050import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+051import 
org.apache.hadoop.hbase.security.User;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053import 
org.apache.hadoop.hbase.util.IdLock;
+054import 
org.apache.hadoop.hbase.util.NonceKey;
+055import 
org.apache.hadoop.hbase.util.Threads;
+056import 
org.apache.yetus.audience.InterfaceAudience;
+057import org.slf4j.Logger;
+058import org.slf4j.LoggerFactory;
 059
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-061
-062/**
-063 * Thread Pool that executes the 
submitted procedures.
-064 * The executor has a ProcedureStore 
associated.
-065 * Each operation is logged and on 
restart the pending procedures are resumed.
-066 *
-067 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-068 * the procedure will complete (at some 
point in time), On restart the pending
-069 * procedures are resumed and the once 
failed will be rolledback.
+060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+061import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+062import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+063
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+065
+066/**
+067 * Thread Pool that executes the 
submitted procedures.
+068 * The executor has a ProcedureStore 
associated.
+069 * Each operation is logged and on 
restart the pending procedures are resumed.
 070 *
-071 * The user can add procedures to the 
executor via submitProcedure(proc)
-072 * check for the finished state via 
isFinished(procId)
-073 * and get the result via 
getResult(procId)
-074 */
-075@InterfaceAudience.Private
-076public class 
ProcedureExecutorTEnvironment {
-077  private static final Logger LOG = 

[25/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart 

[25/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicy.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicy.html
 
b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicy.html
index 8f37ef2..d299b68 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicy.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicy.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RetryCounter.ExponentialBackoffPolicy
+public static class RetryCounter.ExponentialBackoffPolicy
 extends RetryCounter.BackoffPolicy
 
 
@@ -192,7 +192,7 @@ extends 
 
 ExponentialBackoffPolicy
-publicExponentialBackoffPolicy()
+publicExponentialBackoffPolicy()
 
 
 
@@ -209,7 +209,7 @@ extends 
 
 getBackoffTime
-publiclonggetBackoffTime(RetryCounter.RetryConfigconfig,
+publiclonggetBackoffTime(RetryCounter.RetryConfigconfig,
intattempts)
 
 Overrides:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicyWithLimit.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicyWithLimit.html
 
b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicyWithLimit.html
index 64ea929..75d6d08 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicyWithLimit.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.ExponentialBackoffPolicyWithLimit.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RetryCounter.ExponentialBackoffPolicyWithLimit
+public static class RetryCounter.ExponentialBackoffPolicyWithLimit
 extends RetryCounter.ExponentialBackoffPolicy
 
 
@@ -193,7 +193,7 @@ extends 
 
 ExponentialBackoffPolicyWithLimit
-publicExponentialBackoffPolicyWithLimit()
+publicExponentialBackoffPolicyWithLimit()
 
 
 
@@ -210,7 +210,7 @@ extends 
 
 getBackoffTime
-publiclonggetBackoffTime(RetryCounter.RetryConfigconfig,
+publiclonggetBackoffTime(RetryCounter.RetryConfigconfig,
intattempts)
 
 Overrides:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.RetryConfig.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.RetryConfig.html 
b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.RetryConfig.html
index 7922e03..d803f14 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.RetryConfig.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/RetryCounter.RetryConfig.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RetryCounter.RetryConfig
+public static class RetryCounter.RetryConfig
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Configuration for a retry counter
 
@@ -143,18 +143,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 DEFAULT_BACKOFF_POLICY
 
 
+private float
+jitter
+
+
 private int
 maxAttempts
 
-
+
 private long
 maxSleepTime
 
-
+
 private long
 sleepInterval
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in java.util.concurrent">TimeUnit
 timeUnit
 
@@ -202,38 +206,46 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 getBackoffPolicy()
 
 
+float
+getJitter()
+
+
 int
 getMaxAttempts()
 
-
+
 long
 getMaxSleepTime()
 
-
+
 long
 getSleepInterval()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in java.util.concurrent">TimeUnit
 getTimeUnit()
 
-
+
 RetryCounter.RetryConfig
 setBackoffPolicy(RetryCounter.BackoffPolicybackoffPolicy)
 
-
+
+RetryCounter.RetryConfig
+setJitter(floatjitter)
+
+
 RetryCounter.RetryConfig
 setMaxAttempts(intmaxAttempts)
 
-
+
 RetryCounter.RetryConfig
 setMaxSleepTime(longmaxSleepTime)
 
-
+
 RetryCounter.RetryConfig
 setSleepInterval(longsleepInterval)
 
-
+
 RetryCounter.RetryConfig
 

[25/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.html
index 49f081b..33c9cc0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.html
@@ -35,309 +35,328 @@
 027import 
java.util.concurrent.BlockingQueue;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030import 
java.util.concurrent.LinkedBlockingQueue;
-031import 
java.util.concurrent.ThreadPoolExecutor;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.monitoring.ThreadMonitoring;
+030import java.util.concurrent.Executors;
+031import 
java.util.concurrent.LinkedBlockingQueue;
+032import 
java.util.concurrent.ThreadPoolExecutor;
+033import java.util.concurrent.TimeUnit;
+034import 
java.util.concurrent.atomic.AtomicLong;
+035import 
org.apache.hadoop.hbase.monitoring.ThreadMonitoring;
+036import 
org.apache.yetus.audience.InterfaceAudience;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
 039
 040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 041import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 042import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-043import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-044
-045/**
-046 * This is a generic executor service. 
This component abstracts a
-047 * threadpool, a queue to which {@link 
EventType}s can be submitted,
-048 * and a 
codeRunnable/code that handles the object that is added to the 
queue.
-049 *
-050 * pIn order to create a new 
service, create an instance of this class and
-051 * then do: 
codeinstance.startExecutorService("myService");/code.  When 
done
-052 * call {@link #shutdown()}.
-053 *
-054 * pIn order to use the service 
created above, call
-055 * {@link #submit(EventHandler)}.
-056 */
-057@InterfaceAudience.Private
-058public class ExecutorService {
-059  private static final Logger LOG = 
LoggerFactory.getLogger(ExecutorService.class);
-060
-061  // hold the all the executors created 
in a map addressable by their names
-062  private final 
ConcurrentHashMapString, Executor executorMap = new 
ConcurrentHashMap();
+043import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture;
+044import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListeningScheduledExecutorService;
+045import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecutors;
+046import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+047
+048/**
+049 * This is a generic executor service. 
This component abstracts a
+050 * threadpool, a queue to which {@link 
EventType}s can be submitted,
+051 * and a 
codeRunnable/code that handles the object that is added to the 
queue.
+052 *
+053 * pIn order to create a new 
service, create an instance of this class and
+054 * then do: 
codeinstance.startExecutorService("myService");/code.  When 
done
+055 * call {@link #shutdown()}.
+056 *
+057 * pIn order to use the service 
created above, call
+058 * {@link #submit(EventHandler)}.
+059 */
+060@InterfaceAudience.Private
+061public class ExecutorService {
+062  private static final Logger LOG = 
LoggerFactory.getLogger(ExecutorService.class);
 063
-064  // Name of the server hosting this 
executor service.
-065  private final String servername;
+064  // hold the all the executors created 
in a map addressable by their names
+065  private final 
ConcurrentHashMapString, Executor executorMap = new 
ConcurrentHashMap();
 066
-067  /**
-068   * Default constructor.
-069   * @param servername Name of the 
hosting server.
-070   */
-071  public ExecutorService(final String 
servername) {
-072super();
-073this.servername = servername;
-074  }
-075
-076  /**
-077   * Start an executor service with a 
given name. If there was a service already
-078   * started with the same name, this 
throws a RuntimeException.
-079   * @param name Name of the service to 
start.
-080   */
-081  @VisibleForTesting
-082  public void startExecutorService(String 
name, int maxThreads) {
-083if (this.executorMap.get(name) != 
null) {
-084  throw new RuntimeException("An 
executor service with the name " + name +
-085" is already running!");
-086}
-087Executor hbes = new Executor(name, 
maxThreads);
-088if 

[25/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337 

[25/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyCell.html
index 1e6a2bb..9881003 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyCell.html
@@ -145,332 +145,328 @@
 137
 138  @Override
 139  public boolean equals(Object obj) {
-140if (obj == null || (!(obj instanceof 
KeyOnlyFilter))) {
-141  return false;
-142}
-143KeyOnlyFilter f = (KeyOnlyFilter) 
obj;
-144return 
this.areSerializedFieldsEqual(f);
-145  }
-146
-147  @Override
-148  public int hashCode() {
-149return Objects.hash(this.lenAsVal);
-150  }
+140return obj instanceof Filter 
 areSerializedFieldsEqual((Filter) obj);
+141  }
+142
+143  @Override
+144  public int hashCode() {
+145return Objects.hash(this.lenAsVal);
+146  }
+147
+148  static class KeyOnlyCell implements 
Cell {
+149private Cell cell;
+150private boolean lenAsVal;
 151
-152  static class KeyOnlyCell implements 
Cell {
-153private Cell cell;
-154private boolean lenAsVal;
-155
-156public KeyOnlyCell(Cell c, boolean 
lenAsVal) {
-157  this.cell = c;
-158  this.lenAsVal = lenAsVal;
-159}
-160
-161@Override
-162public byte[] getRowArray() {
-163  return cell.getRowArray();
-164}
-165
-166@Override
-167public int getRowOffset() {
-168  return cell.getRowOffset();
-169}
-170
-171@Override
-172public short getRowLength() {
-173  return cell.getRowLength();
-174}
-175
-176@Override
-177public byte[] getFamilyArray() {
-178  return cell.getFamilyArray();
-179}
-180
-181@Override
-182public int getFamilyOffset() {
-183  return cell.getFamilyOffset();
-184}
-185
-186@Override
-187public byte getFamilyLength() {
-188  return cell.getFamilyLength();
-189}
-190
-191@Override
-192public byte[] getQualifierArray() {
-193  return cell.getQualifierArray();
-194}
-195
-196@Override
-197public int getQualifierOffset() {
-198  return cell.getQualifierOffset();
-199}
-200
-201@Override
-202public int getQualifierLength() {
-203  return cell.getQualifierLength();
-204}
-205
-206@Override
-207public long getTimestamp() {
-208  return cell.getTimestamp();
-209}
-210
-211@Override
-212public byte getTypeByte() {
-213  return cell.getTypeByte();
-214}
-215
-216@Override
-217public Type getType() {
-218  return cell.getType();
-219}
-220
-221
-222@Override
-223public long getSequenceId() {
-224  return 0;
-225}
-226
-227@Override
-228public byte[] getValueArray() {
-229  if (lenAsVal) {
-230return 
Bytes.toBytes(cell.getValueLength());
-231  } else {
-232return 
HConstants.EMPTY_BYTE_ARRAY;
-233  }
-234}
-235
-236@Override
-237public int getValueOffset() {
-238  return 0;
-239}
-240
-241@Override
-242public int getValueLength() {
-243  if (lenAsVal) {
-244return Bytes.SIZEOF_INT;
-245  } else {
-246return 0;
-247  }
-248}
-249
-250@Override
-251public byte[] getTagsArray() {
-252  return 
HConstants.EMPTY_BYTE_ARRAY;
-253}
-254
-255@Override
-256public int getTagsOffset() {
-257  return 0;
-258}
-259
-260@Override
-261public int getTagsLength() {
-262  return 0;
-263}
-264  }
-265
-266  static class 
KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell {
-267public static final int 
FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
-268+ Bytes.SIZEOF_BOOLEAN;
-269private ByteBufferExtendedCell 
cell;
-270private boolean lenAsVal;
-271
-272public 
KeyOnlyByteBufferExtendedCell(ByteBufferExtendedCell c, boolean lenAsVal) {
-273  this.cell = c;
-274  this.lenAsVal = lenAsVal;
-275}
-276
-277@Override
-278public byte[] getRowArray() {
-279  return cell.getRowArray();
-280}
-281
-282@Override
-283public int getRowOffset() {
-284  return cell.getRowOffset();
-285}
-286
-287@Override
-288public short getRowLength() {
-289  return cell.getRowLength();
-290}
-291
-292@Override
-293public byte[] getFamilyArray() {
-294  return cell.getFamilyArray();
-295}
-296
-297@Override
-298public int getFamilyOffset() {
-299  return cell.getFamilyOffset();
-300}
-301
-302@Override
-303public byte getFamilyLength() {
-304  return cell.getFamilyLength();
-305}
-306
-307@Override
-308public byte[] 

[25/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.html
index f99e988..6da1265 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.html
@@ -146,26 +146,30 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 metricRegistry
 
 
+private MetricsTable
+metricsTable
+
+
 private MetricsRegionServerQuotaSource
 quotaSource
 
-
+
 private MetricsRegionServerWrapper
 regionServerWrapper
 
-
+
 static boolean
 RS_ENABLE_TABLE_METRICS_DEFAULT
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RS_ENABLE_TABLE_METRICS_KEY
 
-
+
 private MetricsRegionServerSource
 serverSource
 
-
+
 private RegionServerTableMetrics
 tableMetrics
 
@@ -184,13 +188,15 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Constructor and Description
 
 
-MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
-   
org.apache.hadoop.conf.Configurationconf)
+MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
+   org.apache.hadoop.conf.Configurationconf,
+   MetricsTablemetricsTable)
 
 
-MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
+MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
MetricsRegionServerSourceserverSource,
-   RegionServerTableMetricstableMetrics)
+   RegionServerTableMetricstableMetrics,
+   MetricsTablemetricsTable)
 
 
 
@@ -256,7 +262,8 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-updateCompaction(booleanisMajor,
+updateCompaction(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable,
+booleanisMajor,
 longt,
 intinputFileCount,
 intoutputFileCount,
@@ -275,7 +282,8 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-updateFlush(longt,
+updateFlush(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable,
+   longt,
longmemstoreSize,
longfileSize)
 
@@ -392,13 +400,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 privateRegionServerTableMetrics tableMetrics
 
 
+
+
+
+
+
+metricsTable
+private finalMetricsTable metricsTable
+
+
 
 
 
 
 
 quotaSource
-privateMetricsRegionServerQuotaSource quotaSource
+privateMetricsRegionServerQuotaSource quotaSource
 
 
 
@@ -407,7 +424,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 metricRegistry
-privateMetricRegistry metricRegistry
+privateMetricRegistry metricRegistry
 
 
 
@@ -416,7 +433,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 bulkLoadTimer
-privateTimer bulkLoadTimer
+privateTimer bulkLoadTimer
 
 
 
@@ -427,25 +444,27 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Constructor Detail
-
+
 
 
 
 
 MetricsRegionServer
-publicMetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
-   
org.apache.hadoop.conf.Configurationconf)
+publicMetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
+   org.apache.hadoop.conf.Configurationconf,
+   MetricsTablemetricsTable)
 
 
-
+
 
 
 
 
 MetricsRegionServer
-MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
+MetricsRegionServer(MetricsRegionServerWrapperregionServerWrapper,
 MetricsRegionServerSourceserverSource,
-RegionServerTableMetricstableMetrics)
+RegionServerTableMetricstableMetrics,
+MetricsTablemetricsTable)
 
 
 
@@ -462,7 +481,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 createTableMetrics
-staticRegionServerTableMetricscreateTableMetrics(org.apache.hadoop.conf.Configurationconf)
+staticRegionServerTableMetricscreateTableMetrics(org.apache.hadoop.conf.Configurationconf)
 Creates an instance of RegionServerTableMetrics 
only if the feature is enabled.
 
 
@@ -472,7 +491,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMetricsSource
-publicMetricsRegionServerSourcegetMetricsSource()
+publicMetricsRegionServerSourcegetMetricsSource()
 
 
 
@@ -481,7 +500,7 @@ extends 

[25/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index c9128a4..c5b8dbd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -63,750 +63,734 @@
 055public class RegionStates {
 056  private static final Logger LOG = 
LoggerFactory.getLogger(RegionStates.class);
 057
-058  // TODO: need to be more specific, i.e, 
OPENING vs. OPEN, CLOSING vs. CLOSED.
-059  static final State[] 
STATES_EXPECTED_ON_OPEN = new State[] {
-060State.OPEN, // State may already be 
OPEN if we died after receiving the OPEN from regionserver
-061// but before complete 
finish of AssignProcedure. HBASE-20100.
-062State.OFFLINE, State.CLOSED, 
State.ABNORMALLY_CLOSED, // disable/offline
-063State.SPLITTING, // 
ServerCrashProcedure
-064State.OPENING, State.FAILED_OPEN, // 
already in-progress (retrying)
-065State.MERGED, State.SPLITTING_NEW
-066  };
-067
-068  static final State[] 
STATES_EXPECTED_ON_CLOSE = new State[] {
-069State.SPLITTING, State.MERGING, 
State.OPENING, // ServerCrashProcedure
-070State.OPEN,   // 
enabled/open
-071State.CLOSING // 
already in-progress (retrying)
-072  };
-073
-074  // This comparator sorts the 
RegionStates by time stamp then Region name.
-075  // Comparing by timestamp alone can 
lead us to discard different RegionStates that happen
-076  // to share a timestamp.
-077  private static class 
RegionStateStampComparator implements ComparatorRegionState {
-078@Override
-079public int compare(final RegionState 
l, final RegionState r) {
-080  int stampCmp = 
Long.compare(l.getStamp(), r.getStamp());
-081  return stampCmp != 0 ? stampCmp : 
RegionInfo.COMPARATOR.compare(l.getRegion(), r.getRegion());
-082}
-083  }
-084
-085  public final static 
RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR =
-086  new RegionStateStampComparator();
-087
-088  // TODO: Replace the 
ConcurrentSkipListMaps
-089  /**
-090   * RegionName -- i.e. 
RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
-091   */
-092  private final 
ConcurrentSkipListMapbyte[], RegionStateNode regionsMap =
-093  new 
ConcurrentSkipListMapbyte[], RegionStateNode(Bytes.BYTES_COMPARATOR);
+058  // This comparator sorts the 
RegionStates by time stamp then Region name.
+059  // Comparing by timestamp alone can 
lead us to discard different RegionStates that happen
+060  // to share a timestamp.
+061  private static class 
RegionStateStampComparator implements ComparatorRegionState {
+062@Override
+063public int compare(final RegionState 
l, final RegionState r) {
+064  int stampCmp = 
Long.compare(l.getStamp(), r.getStamp());
+065  return stampCmp != 0 ? stampCmp : 
RegionInfo.COMPARATOR.compare(l.getRegion(), r.getRegion());
+066}
+067  }
+068
+069  public final static 
RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR =
+070  new RegionStateStampComparator();
+071
+072  // TODO: Replace the 
ConcurrentSkipListMaps
+073  /**
+074   * RegionName -- i.e. 
RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
+075   */
+076  private final 
ConcurrentSkipListMapbyte[], RegionStateNode regionsMap =
+077  new 
ConcurrentSkipListMapbyte[], RegionStateNode(Bytes.BYTES_COMPARATOR);
+078
+079  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionInTransition =
+080new 
ConcurrentSkipListMapRegionInfo, 
RegionStateNode(RegionInfo.COMPARATOR);
+081
+082  /**
+083   * Regions marked as offline on a read 
of hbase:meta. Unused or at least, once
+084   * offlined, regions have no means of 
coming on line again. TODO.
+085   */
+086  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionOffline =
+087new 
ConcurrentSkipListMapRegionInfo, RegionStateNode();
+088
+089  private final 
ConcurrentSkipListMapbyte[], RegionFailedOpen regionFailedOpen =
+090new ConcurrentSkipListMapbyte[], 
RegionFailedOpen(Bytes.BYTES_COMPARATOR);
+091
+092  private final 
ConcurrentHashMapServerName, ServerStateNode serverMap =
+093  new 
ConcurrentHashMapServerName, ServerStateNode();
 094
-095  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionInTransition =
-096new 
ConcurrentSkipListMapRegionInfo, 
RegionStateNode(RegionInfo.COMPARATOR);
-097
-098  /**
-099   * Regions marked as offline on a read 
of hbase:meta. Unused or at least, once
-100   * offlined, regions have no means of 

[25/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/apidocs/src-html/org/apache/hadoop/hbase/filter/SkipFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/SkipFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/SkipFilter.html
index 9f62fe6..9a40948 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/SkipFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/SkipFilter.html
@@ -28,148 +28,163 @@
 020package org.apache.hadoop.hbase.filter;
 021
 022import java.io.IOException;
-023
-024import org.apache.hadoop.hbase.Cell;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-027import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-029import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-030
-031/**
-032 * A wrapper filter that filters an 
entire row if any of the Cell checks do
-033 * not pass.
-034 * p
-035 * For example, if all columns in a row 
represent weights of different things,
-036 * with the values being the actual 
weights, and we want to filter out the
-037 * entire row if any of its weights are 
zero.  In this case, we want to prevent
-038 * rows from being emitted if a single 
key is filtered.  Combine this filter
-039 * with a {@link ValueFilter}:
-040 * /p
-041 * p
-042 * code
-043 * scan.setFilter(new SkipFilter(new 
ValueFilter(CompareOp.NOT_EQUAL,
-044 * new 
BinaryComparator(Bytes.toBytes(0;
-045 * /code
-046 * Any row which contained a column whose 
value was 0 will be filtered out
-047 * (since ValueFilter will not pass that 
Cell).
-048 * Without this filter, the other 
non-zero valued columns in the row would still
-049 * be emitted.
-050 * /p
-051 */
-052@InterfaceAudience.Public
-053public class SkipFilter extends 
FilterBase {
-054  private boolean filterRow = false;
-055  private Filter filter;
-056
-057  public SkipFilter(Filter filter) {
-058this.filter = filter;
-059  }
-060
-061  public Filter getFilter() {
-062return filter;
-063  }
-064
-065  @Override
-066  public void reset() throws IOException 
{
-067filter.reset();
-068filterRow = false;
-069  }
-070
-071  private void changeFR(boolean value) 
{
-072filterRow = filterRow || value;
-073  }
-074
-075  @Override
-076  public boolean filterRowKey(Cell cell) 
throws IOException {
-077// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-078return false;
-079  }
-080
-081  @Deprecated
-082  @Override
-083  public ReturnCode filterKeyValue(final 
Cell c) throws IOException {
-084return filterCell(c);
-085  }
-086
-087  @Override
-088  public ReturnCode filterCell(final Cell 
c) throws IOException {
-089ReturnCode rc = 
filter.filterCell(c);
-090changeFR(rc != ReturnCode.INCLUDE);
-091return rc;
-092  }
-093
-094  @Override
-095  public Cell transformCell(Cell v) 
throws IOException {
-096return filter.transformCell(v);
-097  }
-098
-099  @Override
-100  public boolean filterRow() {
-101return filterRow;
-102  }
-103
-104  @Override
-105  public boolean hasFilterRow() {
-106return true;
-107  }
-108
-109  /**
-110   * @return The filter serialized using 
pb
-111   */
-112  @Override
-113  public byte[] toByteArray() throws 
IOException {
-114FilterProtos.SkipFilter.Builder 
builder =
-115  
FilterProtos.SkipFilter.newBuilder();
-116
builder.setFilter(ProtobufUtil.toFilter(this.filter));
-117return 
builder.build().toByteArray();
-118  }
-119
-120  /**
-121   * @param pbBytes A pb serialized 
{@link SkipFilter} instance
-122   * @return An instance of {@link 
SkipFilter} made from codebytes/code
-123   * @throws DeserializationException
-124   * @see #toByteArray
-125   */
-126  public static SkipFilter 
parseFrom(final byte [] pbBytes)
-127  throws DeserializationException {
-128FilterProtos.SkipFilter proto;
-129try {
-130  proto = 
FilterProtos.SkipFilter.parseFrom(pbBytes);
-131} catch 
(InvalidProtocolBufferException e) {
-132  throw new 
DeserializationException(e);
-133}
-134try {
-135  return new 
SkipFilter(ProtobufUtil.toFilter(proto.getFilter()));
-136} catch (IOException ioe) {
-137  throw new 
DeserializationException(ioe);
-138}
-139  }
-140
-141  /**
-142   * @param o the other filter to compare 
with
-143   * @return true if and only if the 
fields of the filter that are serialized
-144   * are equal to the corresponding 
fields in other.  Used for testing.
-145   */
-146  @Override
-147  boolean areSerializedFieldsEqual(Filter 
o) {
-148if (o == this) return true;
-149if (!(o instanceof SkipFilter)) 
return false;
-150
-151SkipFilter other = (SkipFilter)o;
-152return 

[25/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889

[25/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index 4d19565..d213a46 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -134,7 +134,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class SplitTableRegionProcedure
+public class SplitTableRegionProcedure
 extends AbstractStateMachineRegionProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState
 The procedure to split a region in a table.
  Takes lock on the parent region.
@@ -296,102 +296,97 @@ extends 
 private void
+checkClosedRegions(MasterProcedureEnvenv)
+
+
+private void
 checkSplittable(MasterProcedureEnvenv,
RegionInforegionToSplit,
byte[]splitRow)
 Check whether the region is splittable
 
 
-
-private AssignProcedure[]
-createAssignProcedures(MasterProcedureEnvenv,
-  intregionReplication)
-
 
+private TransitRegionStateProcedure[]
+createAssignProcedures(MasterProcedureEnvenv)
+
+
 void
 createDaughterRegions(MasterProcedureEnvenv)
 Create daughter regions
 
 
-
-private UnassignProcedure[]
-createUnassignProcedures(MasterProcedureEnvenv,
-intregionReplication)
-
 
+private TransitRegionStateProcedure[]
+createUnassignProcedures(MasterProcedureEnvenv)
+
+
 protected void
 deserializeStateData(ProcedureStateSerializerserializer)
 Called on store load to allow the user to decode the 
previously serialized
  state.
 
 
-
+
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 private static long
 getDaughterRegionIdTimestamp(RegionInfohri)
 Calculate daughter regionid to use.
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
+
 private RegionInfo
 getParentRegion()
 
-
+
 private ServerName
 getParentRegionServerName(MasterProcedureEnvenv)
 
-
+
 protected ProcedureMetrics
 getProcedureMetrics(MasterProcedureEnvenv)
 Override this method to provide procedure specific counters 
for submitted count, failed
  count and time histogram.
 
 
-
+
 private int
 getRegionReplication(MasterProcedureEnvenv)
 
-
+
 private byte[]
 getSplitRow()
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState
 getState(intstateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
+
 TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
-(package private) static boolean
-hasRecoveredEdits(MasterProcedureEnvenv,
- RegionInfori)
-Check whether there are recovered.edits in the parent 
closed region.
-
-
 
 protected boolean
 isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
@@ -448,6 +443,10 @@ extends 
+private void

[25/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
index 39170f0..7859ebc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i  
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapperImmutableBytesWritable, Cell {
-259private Mapbyte[], byte[] 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapperImmutableBytesWritable, Mutation {
-304private Mapbyte[], byte[] 
cfRenameMap;
-305private ListUUID 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTraceEnabled()) {
-331LOG.trace("Considering the 
row."
-332+ 

[25/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index b7b4236..3d1edb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + = See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * /code
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializerCacheable() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * pTODO: The caller presumes 
no checksumming
-330   * required of this block instance 
since going into cache; checksum already verified on
-331   * underlying block data pulled in from 
filesystem. Is that correct? What if cache is SSD?
+256  public static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER = new 
BlockDeserializer();
+257
+258  public static final class 
BlockDeserializer implements CacheableDeserializerCacheable {
+259private BlockDeserializer() {
+260}
+261
+262@Override
+263public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
+264throws IOException {
+265  // The buf has the file block 
followed by block metadata.
+266  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
+267  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
+268  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
+269  ByteBuff newByteBuff;
+270  if (reuse) {
+271newByteBuff = buf.slice();
+272  } else {
+273int len = buf.limit();
+274newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
+275newByteBuff.put(0, buf, 
buf.position(), len);
+276  }
+277  // Read 

[25/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index f86b651..d3f7afa 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -216,10 +216,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
-org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
+org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
index 35ac4cd..2fa55b6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
@@ -184,10 +184,6 @@ implements latch
 
 
-private boolean
-locked
-
-
 protected https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 peerId
 
@@ -259,41 +255,43 @@ implements 
 protected boolean
-hasLock(MasterProcedureEnvenv)
-This is used in conjunction with Procedure.holdLock(Object).
-
-
-
-protected boolean
 holdLock(MasterProcedureEnvenv)
 Used to keep the procedure lock even when the procedure is 
yielding or suspended.
 
 
-
+
 protected void
 refreshPeer(MasterProcedureEnvenv,
PeerProcedureInterface.PeerOperationTypetype)
 
-
+
 protected void
 releaseLock(MasterProcedureEnvenv)
 The user should override this method, and release lock if 
necessary.
 
 
-
+
 protected void
 rollbackState(MasterProcedureEnvenv,
  TStatestate)
 called to perform the rollback of the specified state
 
 
-
+
 protected void
 serializeStateData(ProcedureStateSerializerserializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
+
+protected boolean
+waitInitialized(MasterProcedureEnvenv)
+The Procedure.doAcquireLock(Object,
 ProcedureStore) will be split into two steps, first, it will
+ call us to determine whether we need to wait for initialization, second, it 
will call
+ Procedure.acquireLock(Object)
 to actually handle the lock for this procedure.
+
+
 
 
 
@@ -307,7 +305,7 @@ implements Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#setTimeout-int-">setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 

[25/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648
-649  return 

[25/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/NamespaceDescriptor.Builder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/NamespaceDescriptor.Builder.html 
b/apidocs/org/apache/hadoop/hbase/NamespaceDescriptor.Builder.html
index c15c615..869246c 100644
--- a/apidocs/org/apache/hadoop/hbase/NamespaceDescriptor.Builder.html
+++ b/apidocs/org/apache/hadoop/hbase/NamespaceDescriptor.Builder.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = {"i0":10,"i1":10,"i2":10,"i3":10};
-var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

ç±» NamespaceDescriptor.Builder

+

Class NamespaceDescriptor.Builder