[24/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 68302bf..a5a8905 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2197,1768 +2197,1775 @@
 2189  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2190}
 2191
-2192for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
-2193  if (hcd.getTimeToLive() = 0) 
{
-2194String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
-2195
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2196  }
-2197
-2198  // check blockSize
-2199  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
-2200String message = "Block size for 
column family " + hcd.getNameAsString()
-2201+ "  must be between 1K and 
16MB.";
+2192// check that we have minimum 1 
region replicas
+2193int regionReplicas = 
htd.getRegionReplication();
+2194if (regionReplicas  1) {
+2195  String message = "Table region 
replication should be at least one.";
+2196  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2197}
+2198
+2199for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
+2200  if (hcd.getTimeToLive() = 0) 
{
+2201String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
 2202
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2203  }
 2204
-2205  // check versions
-2206  if (hcd.getMinVersions()  0) 
{
-2207String message = "Min versions 
for column family " + hcd.getNameAsString()
-2208  + "  must be positive.";
+2205  // check blockSize
+2206  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
+2207String message = "Block size for 
column family " + hcd.getNameAsString()
+2208+ "  must be between 1K and 
16MB.";
 2209
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2210  }
-2211  // max versions already being 
checked
-2212
-2213  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
-2214  //  does not throw 
IllegalArgumentException
-2215  // check minVersions = 
maxVerions
-2216  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
-2217String message = "Min versions 
for column family " + hcd.getNameAsString()
-2218+ " must be less than the 
Max versions.";
-2219
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2220  }
-2221
-  // check replication scope
-2223  checkReplicationScope(hcd);
-2224  // check bloom filter type
-2225  checkBloomFilterType(hcd);
-2226
-2227  // check data replication factor, 
it can be 0(default value) when user has not explicitly
-2228  // set the value, in this case we 
use default replication factor set in the file system.
-2229  if (hcd.getDFSReplication()  
0) {
-2230String message = "HFile 
Replication for column family " + hcd.getNameAsString()
-2231+ "  must be greater than 
zero.";
-2232
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2233  }
-2234
-2235  // TODO: should we check 
coprocessors and encryption ?
-2236}
-2237  }
-2238
-2239  private void 
checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
-2240// check replication scope
-2241WALProtos.ScopeType scop = 
WALProtos.ScopeType.valueOf(hcd.getScope());
-2242if (scop == null) {
-2243  String message = "Replication 
scope for column family "
-2244  + hcd.getNameAsString() + " is 
" + hcd.getScope() + " which is invalid.";
+2211
+2212  // check versions
+2213  if (hcd.getMinVersions()  0) 
{
+2214String message = "Min versions 
for column family " + hcd.getNameAsString()
+2215  + "  must be positive.";
+2216
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2217  }
+2218  // max versions already being 
checked
+2219
+2220  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
+2221  //  does not throw 
IllegalArgumentException
+  // check minVersions = 
maxVerions
+2223  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
+2224String message = "Min versions 
for column family " + hcd.getNameAsString()
+2225+ " must be less than the 
Max versions.";
+2226

[24/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 9ef9095..f8bdc09 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":9,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":9,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":9,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":9,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":9,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":9,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -460,18 +460,22 @@ implements splitPlanCount
 
 
+private SplitWALManager
+splitWALManager
+
+
 private SyncReplicationReplayWALManager
 syncReplicationReplayWALManager
 
-
+
 private TableStateManager
 tableStateManager
 
-
+
 private static 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSethttps://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends Procedure
 UNSUPPORTED_PROCEDURES
 
-
+
 private MasterWalManager
 walManager
 
@@ -1061,12 +1065,16 @@ implements getSplitPlanCount()
 
 
+SplitWALManager
+getSplitWALManager()
+
+
 

[24/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
index 9aa9b59..ac7e0ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
@@ -28,520 +28,565 @@
 020import static 
org.apache.hadoop.hbase.HConstants.NINES;
 021import static 
org.apache.hadoop.hbase.HConstants.ZEROES;
 022import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-023import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-025import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-026import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-027import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-028
-029import java.io.IOException;
-030import java.util.Arrays;
-031import java.util.HashSet;
-032import java.util.Iterator;
-033import java.util.LinkedHashMap;
-034import java.util.Map;
-035import java.util.Optional;
-036import java.util.Set;
-037import 
java.util.concurrent.CompletableFuture;
-038import 
java.util.concurrent.ConcurrentHashMap;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentNavigableMap;
-041import 
java.util.concurrent.ConcurrentSkipListMap;
-042import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.MetaTableAccessor;
-046import 
org.apache.hadoop.hbase.RegionLocations;
-047import 
org.apache.hadoop.hbase.TableName;
-048import 
org.apache.hadoop.hbase.TableNotFoundException;
-049import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.yetus.audience.InterfaceAudience;
-052import org.slf4j.Logger;
-053import org.slf4j.LoggerFactory;
-054
-055import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-056
-057/**
-058 * The asynchronous locator for regions 
other than meta.
-059 */
-060@InterfaceAudience.Private
-061class AsyncNonMetaRegionLocator {
-062
-063  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
+023import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.canUpdateOnError;
+024import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
+025import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
+026import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.mergeRegionLocations;
+027import static 
org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.removeRegionLocation;
+028import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
+029import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+030import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
+031import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
+032import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
+033
+034import java.io.IOException;
+035import java.util.Arrays;
+036import java.util.HashSet;
+037import java.util.Iterator;
+038import java.util.LinkedHashMap;
+039import java.util.Map;
+040import java.util.Optional;
+041import java.util.Set;
+042import 
java.util.concurrent.CompletableFuture;
+043import 
java.util.concurrent.ConcurrentHashMap;
+044import 
java.util.concurrent.ConcurrentMap;
+045import 
java.util.concurrent.ConcurrentNavigableMap;
+046import 
java.util.concurrent.ConcurrentSkipListMap;
+047import 
org.apache.commons.lang3.ObjectUtils;
+048import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+049import 
org.apache.hadoop.hbase.HBaseIOException;
+050import 
org.apache.hadoop.hbase.HConstants;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.MetaTableAccessor;
+053import 
org.apache.hadoop.hbase.RegionLocations;
+054import 
org.apache.hadoop.hbase.TableName;
+055import 
org.apache.hadoop.hbase.TableNotFoundException;
+056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
+057import 
org.apache.hadoop.hbase.util.Bytes;
+058import 
org.apache.yetus.audience.InterfaceAudience;
+059import org.slf4j.Logger;
+060import org.slf4j.LoggerFactory;
+061
+062import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+063import 
org.apache.hbase.thirdparty.com.google.common.base.Objects;

[24/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
index c4e8c8b..aa58108 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
@@ -82,602 +82,613 @@
 074  public static final String 
USER_COPROCESSORS_ENABLED_CONF_KEY =
 075"hbase.coprocessor.user.enabled";
 076  public static final boolean 
DEFAULT_USER_COPROCESSORS_ENABLED = true;
-077
-078  private static final Logger LOG = 
LoggerFactory.getLogger(CoprocessorHost.class);
-079  protected Abortable abortable;
-080  /** Ordered set of loaded coprocessors 
with lock */
-081  protected final SortedListE 
coprocEnvironments =
-082  new SortedList(new 
EnvironmentPriorityComparator());
-083  protected Configuration conf;
-084  // unique file prefix to use for local 
copies of jars when classloading
-085  protected String pathPrefix;
-086  protected AtomicInteger loadSequence = 
new AtomicInteger();
-087
-088  public CoprocessorHost(Abortable 
abortable) {
-089this.abortable = abortable;
-090this.pathPrefix = 
UUID.randomUUID().toString();
-091  }
-092
-093  /**
-094   * Not to be confused with the 
per-object _coprocessors_ (above),
-095   * coprocessorNames is static and 
stores the set of all coprocessors ever
-096   * loaded by any thread in this JVM. It 
is strictly additive: coprocessors are
-097   * added to coprocessorNames, by 
checkAndLoadInstance() but are never removed, since
-098   * the intention is to preserve a 
history of all loaded coprocessors for
-099   * diagnosis in case of server crash 
(HBASE-4014).
-100   */
-101  private static SetString 
coprocessorNames =
-102  Collections.synchronizedSet(new 
HashSetString());
-103
-104  public static SetString 
getLoadedCoprocessors() {
-105synchronized (coprocessorNames) {
-106  return new 
HashSet(coprocessorNames);
-107}
-108  }
-109
-110  /**
-111   * Used to create a parameter to the 
HServerLoad constructor so that
-112   * HServerLoad can provide information 
about the coprocessors loaded by this
-113   * regionserver.
-114   * (HBASE-4070: Improve region server 
metrics to report loaded coprocessors
-115   * to master).
-116   */
-117  public SetString 
getCoprocessors() {
-118SetString returnValue = new 
TreeSet();
-119for (E e: coprocEnvironments) {
-120  
returnValue.add(e.getInstance().getClass().getSimpleName());
-121}
-122return returnValue;
-123  }
-124
-125  /**
-126   * Load system coprocessors once only. 
Read the class names from configuration.
-127   * Called by constructor.
-128   */
-129  protected void 
loadSystemCoprocessors(Configuration conf, String confKey) {
-130boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
-131  DEFAULT_COPROCESSORS_ENABLED);
-132if (!coprocessorsEnabled) {
-133  return;
-134}
-135
-136Class? implClass;
-137
-138// load default coprocessors from 
configure file
-139String[] defaultCPClasses = 
conf.getStrings(confKey);
-140if (defaultCPClasses == null || 
defaultCPClasses.length == 0)
-141  return;
-142
-143int priority = 
Coprocessor.PRIORITY_SYSTEM;
-144for (String className : 
defaultCPClasses) {
-145  className = className.trim();
-146  if (findCoprocessor(className) != 
null) {
-147// If already loaded will just 
continue
-148LOG.warn("Attempted duplicate 
loading of " + className + "; skipped");
-149continue;
-150  }
-151  ClassLoader cl = 
this.getClass().getClassLoader();
-152  
Thread.currentThread().setContextClassLoader(cl);
-153  try {
-154implClass = 
cl.loadClass(className);
-155// Add coprocessors as we go to 
guard against case where a coprocessor is specified twice
-156// in the configuration
-157E env = 
checkAndLoadInstance(implClass, priority, conf);
-158if (env != null) {
-159  
this.coprocEnvironments.add(env);
-160  LOG.info("System coprocessor {} 
loaded, priority={}.", className, priority);
-161  ++priority;
-162}
-163  } catch (Throwable t) {
-164// We always abort if system 
coprocessors cannot be loaded
-165abortServer(className, t);
-166  }
-167}
-168  }
-169
-170  /**
-171   * Load a coprocessor implementation 
into the host
-172   * @param path path to implementation 
jar
-173   * @param className the main class 
name
-174   * @param priority chaining priority
-175   * @param conf configuration for 
coprocessor
-176   * @throws java.io.IOException 
Exception
-177   */
-178  public E load(Path path, 

[24/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/example/class-use/MultiThreadedClientExample.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/example/class-use/MultiThreadedClientExample.html
 
b/devapidocs/org/apache/hadoop/hbase/client/example/class-use/MultiThreadedClientExample.html
index df252e4..74af90c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/example/class-use/MultiThreadedClientExample.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/example/class-use/MultiThreadedClientExample.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/example/class-use/RefreshHFilesClient.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/example/class-use/RefreshHFilesClient.html
 
b/devapidocs/org/apache/hadoop/hbase/client/example/class-use/RefreshHFilesClient.html
index 3cd45c1..725aa02 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/example/class-use/RefreshHFilesClient.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/example/class-use/RefreshHFilesClient.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/example/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/example/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/example/package-summary.html
index 608c53e..96deaa5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/example/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/example/package-summary.html
@@ -199,6 +199,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/example/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/example/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/example/package-tree.html
index 89e1ba0..0434168 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/example/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/example/package-tree.html
@@ -159,6 +159,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/example/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/example/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/client/example/package-use.html
index d874f00..ae21e57 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/example/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/example/package-use.html
@@ -153,6 +153,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/locking/EntityLock.LockHeartbeatWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/locking/EntityLock.LockHeartbeatWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/client/locking/EntityLock.LockHeartbeatWorker.html
index d03818e..c35b8a4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/locking/EntityLock.LockHeartbeatWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/locking/EntityLock.LockHeartbeatWorker.html
@@ -370,6 +370,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 


[24/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
index 765960b..d05647a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -294,46 +294,51 @@ implements 
+private Cell
+createNewCellWithTags(Mutationmutation,
+ CellnewCell)
+
+
 void
 getAuths(com.google.protobuf.RpcControllercontroller,
 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsRequestrequest,
 
com.google.protobuf.RpcCallbackorg.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponsedone)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalMasterObserver
 getMasterObserver()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionObserver
 getRegionObserver()
 Observer/Service Getters
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in 
java.lang">Iterablecom.google.protobuf.Service
 getServices()
 Coprocessor endpoints providing protobuf services should 
override this method.
 
 
-
+
 private void
 initVisibilityLabelService(RegionCoprocessorEnvironmentenv)
 
-
+
 static boolean
 isCellAuthorizationSupported(org.apache.hadoop.conf.Configurationconf)
 
-
+
 private boolean
 isSystemOrSuperUser()
 
-
+
 void
 listLabels(com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.ListLabelsRequestrequest,
   
com.google.protobuf.RpcCallbackorg.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.ListLabelsResponsedone)
 
-
+
 private void
 logResult(booleanisAllowed,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
@@ -342,38 +347,45 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]labelAuths,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringregex)
 
-
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,Cell
+postAppendBeforeWAL(ObserverContextRegionCoprocessorEnvironmentctx,
+   Mutationmutation,
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,CellcellPairs)
+Called after a list of new cells has been created during an 
append operation, but before
+ they are committed to the WAL or memstore.
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,Cell
+postIncrementBeforeWAL(ObserverContextRegionCoprocessorEnvironmentctx,
+  Mutationmutation,
+  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,CellcellPairs)
+Called after a list of new cells has been created during an 
increment operation, but before
+ they are committed to the WAL or memstore.
+
+
+
 DeleteTracker
 postInstantiateDeleteTracker(ObserverContextRegionCoprocessorEnvironmentctx,
 DeleteTrackerdelTracker)
 Called after the ScanQueryMatcher 

[24/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 82d728d..ac869e3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -168,7 +168,7 @@ implements avgStoreFileAge
 
 
-private BlockCache
+private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalBlockCache
 blockCache
 
 
@@ -176,7 +176,7 @@ implements blockedRequestsCount
 
 
-private CacheStats
+private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats
 cacheStats
 
 
@@ -242,11 +242,11 @@ implements flushedCellsSize
 
 
-private CacheStats
+private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats
 l1Stats
 
 
-private CacheStats
+private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats
 l2Stats
 
 
@@ -278,7 +278,7 @@ implements minStoreFileAge
 
 
-private MobFileCache
+private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalMobFileCache
 mobFileCache
 
 
@@ -1050,10 +1050,7 @@ implements 
 private void
-initBlockCache()
-It's possible that due to threading the block cache could 
not be initialized
- yet (testing multiple region servers in one jvm).
-
+initBlockCache()
 
 
 private void
@@ -1116,7 +1113,7 @@ implements 
 
 blockCache
-privateBlockCache blockCache
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalBlockCache blockCache
 
 
 
@@ -1125,7 +1122,34 @@ implements 
 
 mobFileCache
-privateMobFileCache mobFileCache
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalMobFileCache mobFileCache
+
+
+
+
+
+
+
+cacheStats
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats cacheStats
+
+
+
+
+
+
+
+l1Stats
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats l1Stats
+
+
+
+
+
+
+
+l2Stats
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCacheStats l2Stats
 
 
 
@@ -1134,7 +1158,7 @@ implements 
 
 numStores
-private volatilelong numStores
+private volatilelong numStores
 
 
 
@@ -1143,7 +1167,7 @@ implements 
 
 numWALFiles
-private volatilelong numWALFiles
+private volatilelong numWALFiles
 
 
 
@@ -1152,7 +1176,7 @@ implements 
 
 walFileSize
-private volatilelong walFileSize
+private volatilelong walFileSize
 
 
 
@@ -1161,7 +1185,7 @@ implements 
 
 numStoreFiles
-private volatilelong numStoreFiles
+private volatilelong numStoreFiles
 
 
 
@@ -1170,7 +1194,7 @@ implements 
 
 memstoreSize
-private volatilelong memstoreSize
+private volatilelong memstoreSize
 
 
 
@@ -1179,7 +1203,7 @@ implements 
 
 storeFileSize
-private volatilelong storeFileSize
+private volatilelong storeFileSize
 
 
 
@@ -1188,7 +1212,7 @@ implements 
 
 storeFileSizeGrowthRate
-private volatiledouble storeFileSizeGrowthRate
+private volatiledouble storeFileSizeGrowthRate
 
 
 
@@ -1197,7 +1221,7 @@ implements 
 
 maxStoreFileAge
-private volatilelong maxStoreFileAge
+private volatilelong maxStoreFileAge
 
 
 
@@ -1206,7 +1230,7 @@ implements 
 
 minStoreFileAge
-private volatilelong minStoreFileAge
+private volatilelong minStoreFileAge
 
 
 
@@ -1215,7 +1239,7 @@ implements 
 
 avgStoreFileAge
-private volatilelong avgStoreFileAge
+private volatilelong avgStoreFileAge
 
 
 
@@ -1224,7 +1248,7 @@ implements 
 
 numReferenceFiles
-private volatilelong numReferenceFiles
+private volatilelong numReferenceFiles
 
 
 
@@ -1233,7 +1257,7 @@ implements 
 
 requestsPerSecond
-private volatiledouble requestsPerSecond
+private volatiledouble requestsPerSecond
 
 
 
@@ -1242,7 +1266,7 @@ implements 
 
 readRequestsCount
-private volatilelong readRequestsCount
+private volatilelong readRequestsCount
 
 
 
@@ -1251,7 +1275,7 @@ implements 
 
 readRequestsRatePerSecond
-private volatiledouble readRequestsRatePerSecond
+private volatiledouble readRequestsRatePerSecond
 
 
 
@@ -1260,7 

[24/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile 

[24/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.html
index 8fbd7e5..c827411 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class InitMetaProcedure
+public class InitMetaProcedure
 extends AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaState
 This procedure is used to initialize meta table for a new 
hbase deploy. It will just schedule an
  TransitRegionStateProcedure
 to assign meta.
@@ -181,9 +181,17 @@ extends Field and Description
 
 
+private int
+attempts
+
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CountDownLatch
 latch
 
+
+private static org.slf4j.Logger
+LOG
+
 
 
 
@@ -199,6 +207,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 
@@ -304,6 +319,12 @@ extends 
 protected boolean
+setTimeoutFailure(MasterProcedureEnvenv)
+Called by the ProcedureExecutor when the timeout set by 
setTimeout() is expired.
+
+
+
+protected boolean
 waitInitialized(MasterProcedureEnvenv)
 The Procedure.doAcquireLock(Object,
 ProcedureStore) will be split into two steps, first, it will
  call us to determine whether we need to wait for initialization, second, it 
will call
@@ -330,7 +351,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProc
 IdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isLockedWhenLoading,
 isRunnable,
 isSuccess,
 isWaiting, removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 set
 NonceKey, setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState, setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails, toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProc
 IdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isLockedWhenLoading,
 isRunnable,
 isSuccess,
 isWaiting, removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 set
 NonceKey, setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState, setSubmittedTime,
 setTimeout,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 
@@ -353,13 +374,31 @@ extends 
+
+
+
+
+LOG
+private static finalorg.slf4j.Logger LOG
+
+
 
 
 
-
+
 
 latch
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
 title="class or interface in java.util.concurrent">CountDownLatch latch

[24/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlUtil.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlUtil.html
index 69565e9..3e1345c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessControlUtil.html
@@ -55,7 +55,7 @@
 047  private AccessControlUtil() {}
 048
 049  /**
-050   * Create a request to grant user 
permissions.
+050   * Create a request to grant user table 
permissions.
 051   *
 052   * @param username the short user name 
who to grant permissions
 053   * @param tableName optional table name 
the permissions apply
@@ -96,7 +96,7 @@
 088  }
 089
 090  /**
-091   * Create a request to grant user 
permissions.
+091   * Create a request to grant user 
namespace permissions.
 092   *
 093   * @param username the short user name 
who to grant permissions
 094   * @param namespace optional table name 
the permissions apply
@@ -127,7 +127,7 @@
 119  }
 120
 121  /**
-122   * Create a request to revoke user 
permissions.
+122   * Create a request to revoke user 
global permissions.
 123   *
 124   * @param username the short user name 
whose permissions to be revoked
 125   * @param actions the permissions to be 
revoked
@@ -153,7 +153,7 @@
 145  }
 146
 147  /**
-148   * Create a request to revoke user 
permissions.
+148   * Create a request to revoke user 
namespace permissions.
 149   *
 150   * @param username the short user name 
whose permissions to be revoked
 151   * @param namespace optional table name 
the permissions apply
@@ -184,7 +184,7 @@
 176  }
 177
 178  /**
-179   * Create a request to grant user 
permissions.
+179   * Create a request to grant user 
global permissions.
 180   *
 181   * @param username the short user name 
who to grant permissions
 182   * @param actions the permissions to be 
granted
@@ -248,669 +248,661 @@
 240return result;
 241  }
 242
-243
-244  /**
-245   * Converts a Permission proto to a 
client Permission object.
-246   *
-247   * @param proto the protobuf 
Permission
-248   * @return the converted Permission
-249   */
-250  public static Permission 
toPermission(AccessControlProtos.Permission proto) {
-251if (proto.getType() != 
AccessControlProtos.Permission.Type.Global) {
-252  return toTablePermission(proto);
-253} else {
-254  ListPermission.Action 
actions = toPermissionActions(
-255  
proto.getGlobalPermission().getActionList());
-256  return new 
Permission(actions.toArray(new Permission.Action[actions.size()]));
-257}
-258  }
-259
-260  /**
-261   * Converts a TablePermission proto to 
a client TablePermission object.
-262   * @param proto the protobuf 
TablePermission
-263   * @return the converted 
TablePermission
-264   */
-265  public static TablePermission 
toTablePermission(AccessControlProtos.TablePermission proto) {
-266ListPermission.Action actions 
= toPermissionActions(proto.getActionList());
-267TableName table = null;
-268byte[] qualifier = null;
-269byte[] family = null;
-270if (!proto.hasTableName()) {
-271  throw new 
IllegalStateException("TableName cannot be empty");
-272}
-273table = 
ProtobufUtil.toTableName(proto.getTableName());
-274if (proto.hasFamily()) {
-275  family = 
proto.getFamily().toByteArray();
-276}
-277if (proto.hasQualifier()) {
-278  qualifier = 
proto.getQualifier().toByteArray();
-279}
-280return new TablePermission(table, 
family, qualifier,
-281actions.toArray(new 
Permission.Action[actions.size()]));
-282  }
-283
-284  /**
-285   * Converts a Permission proto to a 
client TablePermission object.
-286   * @param proto the protobuf 
Permission
-287   * @return the converted 
TablePermission
-288   */
-289  public static TablePermission 
toTablePermission(AccessControlProtos.Permission proto) {
-290if(proto.getType() == 
AccessControlProtos.Permission.Type.Global) {
-291  
AccessControlProtos.GlobalPermission perm = proto.getGlobalPermission();
-292  ListPermission.Action 
actions = toPermissionActions(perm.getActionList());
-293
-294  return new TablePermission(null, 
null, null,
-295  actions.toArray(new 
Permission.Action[actions.size()]));
-296}
-297if(proto.getType() == 
AccessControlProtos.Permission.Type.Namespace) {
-298  
AccessControlProtos.NamespacePermission perm = 
proto.getNamespacePermission();
-299  ListPermission.Action 
actions = toPermissionActions(perm.getActionList());
-300
-301  if(!proto.hasNamespacePermission()) 
{
-302throw new 
IllegalStateException("Namespace must not be empty in NamespacePermission");
-303  }
-304  

[24/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 0af8acd..c5f21ac 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -645,1615 +645,1597 @@
 637
proc.afterReplay(getEnvironment());
 638  }
 639});
-640
-641// 4. Push the procedures to the 
timeout executor
-642waitingTimeoutList.forEach(proc - 
{
-643  
proc.afterReplay(getEnvironment());
-644  timeoutExecutor.add(proc);
-645});
-646// 5. restore locks
-647restoreLocks();
-648// 6. Push the procedure to the 
scheduler
-649
failedList.forEach(scheduler::addBack);
-650runnableList.forEach(p - {
-651  p.afterReplay(getEnvironment());
-652  if (!p.hasParent()) {
-653
sendProcedureLoadedNotification(p.getProcId());
-654  }
-655  // If the procedure holds the lock, 
put the procedure in front
-656  // If its parent holds the lock, 
put the procedure in front
-657  // TODO. Is that possible that its 
ancestor holds the lock?
-658  // For now, the deepest procedure 
hierarchy is:
-659  // ModifyTableProcedure - 
ReopenTableProcedure -
-660  // MoveTableProcedure - 
Unassign/AssignProcedure
-661  // But ModifyTableProcedure and 
ReopenTableProcedure won't hold the lock
-662  // So, check parent lock is 
enough(a tricky case is resovled by HBASE-21384).
-663  // If some one change or add new 
procedures making 'grandpa' procedure
-664  // holds the lock, but parent 
procedure don't hold the lock, there will
-665  // be a problem here. We have to 
check one procedure's ancestors.
-666  // And we need to change 
LockAndQueue.hasParentLock(Procedure? proc) method
-667  // to check all ancestors too.
-668  if (p.isLockedWhenLoading() || 
(p.hasParent()  procedures
-669  
.get(p.getParentProcId()).isLockedWhenLoading())) {
-670scheduler.addFront(p, false);
-671  } else {
-672// if it was not, it can wait.
-673scheduler.addBack(p, false);
-674  }
-675});
-676// After all procedures put into the 
queue, signal the worker threads.
-677// Otherwise, there is a race 
condition. See HBASE-21364.
-678scheduler.signalAll();
-679  }
+640// 4. restore locks
+641restoreLocks();
+642
+643// 5. Push the procedures to the 
timeout executor
+644waitingTimeoutList.forEach(proc - 
{
+645  
proc.afterReplay(getEnvironment());
+646  timeoutExecutor.add(proc);
+647});
+648
+649// 6. Push the procedure to the 
scheduler
+650
failedList.forEach(scheduler::addBack);
+651runnableList.forEach(p - {
+652  p.afterReplay(getEnvironment());
+653  if (!p.hasParent()) {
+654
sendProcedureLoadedNotification(p.getProcId());
+655  }
+656  scheduler.addBack(p);
+657});
+658// After all procedures put into the 
queue, signal the worker threads.
+659// Otherwise, there is a race 
condition. See HBASE-21364.
+660scheduler.signalAll();
+661  }
+662
+663  /**
+664   * Initialize the procedure executor, 
but do not start workers. We will start them later.
+665   * p/
+666   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
+667   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
+668   * pending and in-progress 
procedures.
+669   * @param numThreads number of threads 
available for procedure execution.
+670   * @param abortOnCorruption true if you 
want to abort your service in case a corrupted procedure
+671   *  is found on replay. 
otherwise false.
+672   */
+673  public void init(int numThreads, 
boolean abortOnCorruption) throws IOException {
+674// We have numThreads executor + one 
timer thread used for timing out
+675// procedures and triggering periodic 
procedures.
+676this.corePoolSize = numThreads;
+677this.maxPoolSize = 10 * numThreads;
+678LOG.info("Starting {} core workers 
(bigger of cpus/4 or 16) with max (burst) worker count={}",
+679corePoolSize, maxPoolSize);
 680
-681  /**
-682   * Initialize the procedure executor, 
but do not start workers. We will start them later.
-683   * p/
-684   * It calls 
ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, 
and
-685   * ensure a single executor, and start 
the procedure replay to resume and recover the previous
-686   * pending and in-progress 
procedures.
-687   * @param numThreads number of threads 
available for procedure execution.
-688   * @param abortOnCorruption 

[24/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  

[24/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index fde43fb..8147d54 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HBaseTestingUtility
+public class HBaseTestingUtility
 extends HBaseZKTestingUtility
 Facility for testing HBase. Replacement for
  old HBaseTestCase and HBaseClusterTestCase functionality.
@@ -136,7 +136,8 @@ extends To preserve test data directories, pass the system property 
"hbase.testing.preserve.testdir"
- setting it to true.
+ setting it to true.
+ Trigger pre commit.
 
 
 
@@ -1830,7 +1831,7 @@ extends 
 
 REGIONS_PER_SERVER_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -1843,7 +1844,7 @@ extends 
 
 DEFAULT_REGIONS_PER_SERVER
-public static finalint DEFAULT_REGIONS_PER_SERVER
+public static finalint DEFAULT_REGIONS_PER_SERVER
 The default number of regions per regionserver when 
creating a pre-split
  table.
 
@@ -1858,7 +1859,7 @@ extends 
 
 PRESPLIT_TEST_TABLE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
 
 See Also:
 Constant
 Field Values
@@ -1871,7 +1872,7 @@ extends 
 
 PRESPLIT_TEST_TABLE
-public static finalboolean PRESPLIT_TEST_TABLE
+public static finalboolean PRESPLIT_TEST_TABLE
 
 See Also:
 Constant
 Field Values
@@ -1884,7 +1885,7 @@ extends 
 
 MEMSTORETS_TAGS_PARAMETRIZED
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] MEMSTORETS_TAGS_PARAMETRIZED
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] MEMSTORETS_TAGS_PARAMETRIZED
 This is for unit tests parameterized with a single 
boolean.
 
 
@@ -1894,7 +1895,7 @@ extends 
 
 BLOOM_AND_COMPRESSION_COMBINATIONS
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] BLOOM_AND_COMPRESSION_COMBINATIONS
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] BLOOM_AND_COMPRESSION_COMBINATIONS
 
 
 
@@ -1903,7 +1904,7 @@ extends 
 
 fam1
-public static finalbyte[] fam1
+public static finalbyte[] fam1
 
 
 
@@ -1912,7 +1913,7 @@ extends 
 
 fam2
-public static finalbyte[] fam2
+public static finalbyte[] fam2
 
 
 
@@ -1921,7 +1922,7 @@ extends 
 
 fam3
-public static finalbyte[] fam3
+public static finalbyte[] fam3
 
 
 
@@ -1930,7 +1931,7 @@ extends 
 
 COLUMNS
-public static finalbyte[][] COLUMNS
+public static finalbyte[][] COLUMNS
 
 
 
@@ -1939,7 +1940,7 @@ extends 
 
 FIRST_CHAR
-public static finalchar FIRST_CHAR
+public static finalchar FIRST_CHAR
 
 See Also:
 Constant
 Field Values
@@ -1952,7 +1953,7 @@ extends 
 
 LAST_CHAR
-public static finalchar LAST_CHAR
+public static finalchar LAST_CHAR
 
 See Also:
 Constant
 Field Values
@@ -1965,7 +1966,7 @@ extends 
 
 START_KEY_BYTES
-public static finalbyte[] START_KEY_BYTES
+public static finalbyte[] START_KEY_BYTES
 
 
 
@@ -1974,7 +1975,7 @@ extends 
 
 START_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface 

[24/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
index 9023fea..ff13fe2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
@@ -381,7 +381,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStri
 ngSimpleSB, updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
index c7c174f..33ac1a7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
@@ -396,7 +396,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, 
incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 to
 StringSimpleSB, updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 

[24/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.html
deleted file mode 100644
index 03841b5..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.html
+++ /dev/null
@@ -1,429 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TestMobRestoreSnapshotFromClient (Apache HBase 3.0.0-SNAPSHOT Test 
API)
-
-
-
-
-
-var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":9,"i5":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class 
TestMobRestoreSnapshotFromClient
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
-
-
-org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClient
-
-
-
-
-
-
-
-
-
-
-public class TestMobRestoreSnapshotFromClient
-extends TestRestoreSnapshotFromClient
-Test restore snapshots from the client
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-static HBaseClassTestRule
-CLASS_RULE
-
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
-admin,
 emptySnapshot,
 FAMILY,
 name,
 snapshot0Rows,
 snapshot1Rows,
 snapshotName0,
 snapshotName1,
 snapshotName2, 
tableName,
 TEST_FAMILY2,
 TEST_UTIL
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TestMobRestoreSnapshotFromClient()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-protected int
-countRows(org.apache.hadoop.hbase.client.Tabletable,
- byte[]...families)
-
-
-protected void
-createTable()
-
-
-protected 
org.apache.hadoop.hbase.HColumnDescriptor
-getTestRestoreSchemaChangeHCD()
-
-
-static void
-setupCluster()
-
-
-protected static void
-setupConf(org.apache.hadoop.conf.Configurationconf)
-
-
-protected void
-verifyRowCount(HBaseTestingUtilityutil,
-  org.apache.hadoop.hbase.TableNametableName,
-  longexpectedRows)
-
-
-
-
-
-
-Methods inherited from classorg.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
-getNumReplicas,
 setup,
 splitRegion,
 tearDown,
 tearDownAfterClass,
 testCloneAndRestoreSnapshot,
 testCloneSnapshotOfCloned,
 testCorruptedSnapshot,
 testGetCompactionStateAfterRestoringSnapshot,
 testRestoreSchemaChange,
 testRestoreSnapshot,
 testRestoreSnapshotAfterSplittingRegions,
 testRestoreSnapshotAfterTruncate
-
-
-
-
-
-Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 

[24/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 061ce80..bdfc3f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -39,2126 +39,2163 @@
 031import java.util.Set;
 032import 
java.util.concurrent.ConcurrentHashMap;
 033import 
java.util.concurrent.CopyOnWriteArrayList;
-034import java.util.concurrent.TimeUnit;
-035import 
java.util.concurrent.atomic.AtomicBoolean;
-036import 
java.util.concurrent.atomic.AtomicInteger;
-037import 
java.util.concurrent.atomic.AtomicLong;
-038import java.util.stream.Collectors;
-039import java.util.stream.Stream;
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-048import 
org.apache.hadoop.hbase.security.User;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.IdLock;
-051import 
org.apache.hadoop.hbase.util.NonceKey;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056
-057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+034import java.util.concurrent.Executor;
+035import java.util.concurrent.Executors;
+036import java.util.concurrent.TimeUnit;
+037import 
java.util.concurrent.atomic.AtomicBoolean;
+038import 
java.util.concurrent.atomic.AtomicInteger;
+039import 
java.util.concurrent.atomic.AtomicLong;
+040import java.util.stream.Collectors;
+041import java.util.stream.Stream;
+042import 
org.apache.hadoop.conf.Configuration;
+043import 
org.apache.hadoop.hbase.HConstants;
+044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
+050import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+051import 
org.apache.hadoop.hbase.security.User;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053import 
org.apache.hadoop.hbase.util.IdLock;
+054import 
org.apache.hadoop.hbase.util.NonceKey;
+055import 
org.apache.hadoop.hbase.util.Threads;
+056import 
org.apache.yetus.audience.InterfaceAudience;
+057import org.slf4j.Logger;
+058import org.slf4j.LoggerFactory;
 059
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-061
-062/**
-063 * Thread Pool that executes the 
submitted procedures.
-064 * The executor has a ProcedureStore 
associated.
-065 * Each operation is logged and on 
restart the pending procedures are resumed.
-066 *
-067 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-068 * the procedure will complete (at some 
point in time), On restart the pending
-069 * procedures are resumed and the once 
failed will be rolledback.
+060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+061import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+062import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+063
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+065
+066/**
+067 * Thread Pool that executes the 
submitted procedures.
+068 * The executor has a ProcedureStore 
associated.
+069 * Each operation is logged and on 
restart the pending procedures are resumed.
 070 *
-071 * The user can add procedures to the 
executor via submitProcedure(proc)
-072 * check for the finished state via 
isFinished(procId)
-073 * and get the result via 
getResult(procId)
-074 */
-075@InterfaceAudience.Private
-076public class 
ProcedureExecutorTEnvironment {
-077  private static final Logger LOG = 

[24/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the pending procedures are resumed.
-068 *
-069 * 

[24/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
index dad606e..25fc13f 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class WALSplitter.EntryBuffers
+public static class WALSplitter.EntryBuffers
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class which accumulates edits and separates them into a 
buffer per region
  while simultaneously accounting RAM usage. Blocks if the RAM usage crosses
@@ -250,7 +250,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 controller
-WALSplitter.PipelineController controller
+WALSplitter.PipelineController controller
 
 
 
@@ -259,7 +259,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 buffers
-https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],WALSplitter.RegionEntryBuffer buffers
+https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],WALSplitter.RegionEntryBuffer buffers
 
 
 
@@ -268,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 currentlyWriting
-https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Setbyte[] currentlyWriting
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Setbyte[] currentlyWriting
 
 
 
@@ -277,7 +277,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 totalBuffered
-long totalBuffered
+long totalBuffered
 
 
 
@@ -286,7 +286,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 maxHeapUsage
-long maxHeapUsage
+long maxHeapUsage
 
 
 
@@ -295,7 +295,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 splitWriterCreationBounded
-boolean splitWriterCreationBounded
+boolean splitWriterCreationBounded
 
 
 
@@ -312,7 +312,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 EntryBuffers
-publicEntryBuffers(WALSplitter.PipelineControllercontroller,
+publicEntryBuffers(WALSplitter.PipelineControllercontroller,
 longmaxHeapUsage)
 
 
@@ -322,7 +322,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 EntryBuffers
-publicEntryBuffers(WALSplitter.PipelineControllercontroller,
+publicEntryBuffers(WALSplitter.PipelineControllercontroller,
 longmaxHeapUsage,
 booleansplitWriterCreationBounded)
 
@@ -341,7 +341,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 appendEntry
-publicvoidappendEntry(WAL.Entryentry)
+publicvoidappendEntry(WAL.Entryentry)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
 https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Append a log entry into the corresponding region buffer.
@@ -359,7 +359,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getChunkToWrite
-WALSplitter.RegionEntryBuffergetChunkToWrite()
+WALSplitter.RegionEntryBuffergetChunkToWrite()
 
 Returns:
 RegionEntryBuffer a buffer of edits to be written.
@@ -372,7 +372,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 doneWriting
-voiddoneWriting(WALSplitter.RegionEntryBufferbuffer)
+voiddoneWriting(WALSplitter.RegionEntryBufferbuffer)
 
 
 
@@ -381,7 +381,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isRegionCurrentlyWriting
-booleanisRegionCurrentlyWriting(byte[]region)
+booleanisRegionCurrentlyWriting(byte[]region)
 
 
 
@@ -390,7 +390,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 waitUntilDrained
-publicvoidwaitUntilDrained()
+publicvoidwaitUntilDrained()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 

[24/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 0cf012a..976894f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -63,3884 +63,3883 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058
-059import 
org.apache.commons.lang3.StringUtils;
-060import 
org.apache.hadoop.conf.Configuration;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.ChoreService;
-063import 
org.apache.hadoop.hbase.ClusterId;
-064import 
org.apache.hadoop.hbase.ClusterMetrics;
-065import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-066import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-067import 
org.apache.hadoop.hbase.CompoundConfiguration;
-068import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-069import 
org.apache.hadoop.hbase.HBaseIOException;
-070import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-071import 
org.apache.hadoop.hbase.HConstants;
-072import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-073import 
org.apache.hadoop.hbase.MasterNotRunningException;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.TableDescriptors;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.TableNotDisabledException;
-082import 
org.apache.hadoop.hbase.TableNotFoundException;
-083import 
org.apache.hadoop.hbase.UnknownRegionException;
-084import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-085import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-086import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-087import 
org.apache.hadoop.hbase.client.RegionInfo;
-088import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-089import 
org.apache.hadoop.hbase.client.Result;
-090import 
org.apache.hadoop.hbase.client.TableDescriptor;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.TableState;
-093import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-094import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-095import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-096import 
org.apache.hadoop.hbase.executor.ExecutorType;
-097import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-098import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-099import 
org.apache.hadoop.hbase.http.InfoServer;
-100import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-101import 
org.apache.hadoop.hbase.ipc.RpcServer;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-105import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-107import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-108import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-109import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-110import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-111import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-112import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-113import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-114import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-115import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-116import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-117import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-118import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-119import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-120import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-121import 
org.apache.hadoop.hbase.master.locking.LockManager;
-122import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-123import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-124import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-125import 

[24/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});

[24/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html
index 1e6a2bb..9881003 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/KeyOnlyFilter.html
@@ -145,332 +145,328 @@
 137
 138  @Override
 139  public boolean equals(Object obj) {
-140if (obj == null || (!(obj instanceof 
KeyOnlyFilter))) {
-141  return false;
-142}
-143KeyOnlyFilter f = (KeyOnlyFilter) 
obj;
-144return 
this.areSerializedFieldsEqual(f);
-145  }
-146
-147  @Override
-148  public int hashCode() {
-149return Objects.hash(this.lenAsVal);
-150  }
+140return obj instanceof Filter 
 areSerializedFieldsEqual((Filter) obj);
+141  }
+142
+143  @Override
+144  public int hashCode() {
+145return Objects.hash(this.lenAsVal);
+146  }
+147
+148  static class KeyOnlyCell implements 
Cell {
+149private Cell cell;
+150private boolean lenAsVal;
 151
-152  static class KeyOnlyCell implements 
Cell {
-153private Cell cell;
-154private boolean lenAsVal;
-155
-156public KeyOnlyCell(Cell c, boolean 
lenAsVal) {
-157  this.cell = c;
-158  this.lenAsVal = lenAsVal;
-159}
-160
-161@Override
-162public byte[] getRowArray() {
-163  return cell.getRowArray();
-164}
-165
-166@Override
-167public int getRowOffset() {
-168  return cell.getRowOffset();
-169}
-170
-171@Override
-172public short getRowLength() {
-173  return cell.getRowLength();
-174}
-175
-176@Override
-177public byte[] getFamilyArray() {
-178  return cell.getFamilyArray();
-179}
-180
-181@Override
-182public int getFamilyOffset() {
-183  return cell.getFamilyOffset();
-184}
-185
-186@Override
-187public byte getFamilyLength() {
-188  return cell.getFamilyLength();
-189}
-190
-191@Override
-192public byte[] getQualifierArray() {
-193  return cell.getQualifierArray();
-194}
-195
-196@Override
-197public int getQualifierOffset() {
-198  return cell.getQualifierOffset();
-199}
-200
-201@Override
-202public int getQualifierLength() {
-203  return cell.getQualifierLength();
-204}
-205
-206@Override
-207public long getTimestamp() {
-208  return cell.getTimestamp();
-209}
-210
-211@Override
-212public byte getTypeByte() {
-213  return cell.getTypeByte();
-214}
-215
-216@Override
-217public Type getType() {
-218  return cell.getType();
-219}
-220
-221
-222@Override
-223public long getSequenceId() {
-224  return 0;
-225}
-226
-227@Override
-228public byte[] getValueArray() {
-229  if (lenAsVal) {
-230return 
Bytes.toBytes(cell.getValueLength());
-231  } else {
-232return 
HConstants.EMPTY_BYTE_ARRAY;
-233  }
-234}
-235
-236@Override
-237public int getValueOffset() {
-238  return 0;
-239}
-240
-241@Override
-242public int getValueLength() {
-243  if (lenAsVal) {
-244return Bytes.SIZEOF_INT;
-245  } else {
-246return 0;
-247  }
-248}
-249
-250@Override
-251public byte[] getTagsArray() {
-252  return 
HConstants.EMPTY_BYTE_ARRAY;
-253}
-254
-255@Override
-256public int getTagsOffset() {
-257  return 0;
-258}
-259
-260@Override
-261public int getTagsLength() {
-262  return 0;
-263}
-264  }
-265
-266  static class 
KeyOnlyByteBufferExtendedCell extends ByteBufferExtendedCell {
-267public static final int 
FIXED_OVERHEAD = ClassSize.OBJECT + ClassSize.REFERENCE
-268+ Bytes.SIZEOF_BOOLEAN;
-269private ByteBufferExtendedCell 
cell;
-270private boolean lenAsVal;
-271
-272public 
KeyOnlyByteBufferExtendedCell(ByteBufferExtendedCell c, boolean lenAsVal) {
-273  this.cell = c;
-274  this.lenAsVal = lenAsVal;
-275}
-276
-277@Override
-278public byte[] getRowArray() {
-279  return cell.getRowArray();
-280}
-281
-282@Override
-283public int getRowOffset() {
-284  return cell.getRowOffset();
-285}
-286
-287@Override
-288public short getRowLength() {
-289  return cell.getRowLength();
-290}
-291
-292@Override
-293public byte[] getFamilyArray() {
-294  return cell.getFamilyArray();
-295}
-296
-297@Override
-298public int getFamilyOffset() {
-299  return cell.getFamilyOffset();
-300}
-301
-302@Override
-303public byte getFamilyLength() {
-304  return cell.getFamilyLength();
-305}
-306
-307@Override
-308public byte[] getQualifierArray() {
-309  return cell.getQualifierArray();
-310}
-311

[24/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index 420e804..7c1a8c2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -798,7 +798,7 @@ implements 
 
 splitRequest
-private finalMutableFastCounter splitRequest
+private finalMutableFastCounter splitRequest
 
 
 
@@ -807,7 +807,7 @@ implements 
 
 splitSuccess
-private finalMutableFastCounter splitSuccess
+private finalMutableFastCounter splitSuccess
 
 
 
@@ -816,7 +816,7 @@ implements 
 
 splitTimeHisto
-private finalMetricHistogram splitTimeHisto
+private finalMetricHistogram splitTimeHisto
 
 
 
@@ -825,7 +825,7 @@ implements 
 
 flushTimeHisto
-private finalMetricHistogram flushTimeHisto
+private finalMetricHistogram flushTimeHisto
 
 
 
@@ -834,7 +834,7 @@ implements 
 
 flushMemstoreSizeHisto
-private finalMetricHistogram flushMemstoreSizeHisto
+private finalMetricHistogram flushMemstoreSizeHisto
 
 
 
@@ -843,7 +843,7 @@ implements 
 
 flushOutputSizeHisto
-private finalMetricHistogram flushOutputSizeHisto
+private finalMetricHistogram flushOutputSizeHisto
 
 
 
@@ -852,7 +852,7 @@ implements 
 
 flushedMemstoreBytes
-private finalMutableFastCounter flushedMemstoreBytes
+private finalMutableFastCounter flushedMemstoreBytes
 
 
 
@@ -861,7 +861,7 @@ implements 
 
 flushedOutputBytes
-private finalMutableFastCounter flushedOutputBytes
+private finalMutableFastCounter flushedOutputBytes
 
 
 
@@ -870,7 +870,7 @@ implements 
 
 compactionTimeHisto
-private finalMetricHistogram compactionTimeHisto
+private finalMetricHistogram compactionTimeHisto
 
 
 
@@ -879,7 +879,7 @@ implements 
 
 compactionInputFileCountHisto
-private finalMetricHistogram compactionInputFileCountHisto
+private finalMetricHistogram compactionInputFileCountHisto
 
 
 
@@ -888,7 +888,7 @@ implements 
 
 compactionInputSizeHisto
-private finalMetricHistogram compactionInputSizeHisto
+private finalMetricHistogram compactionInputSizeHisto
 
 
 
@@ -897,7 +897,7 @@ implements 
 
 compactionOutputFileCountHisto
-private finalMetricHistogram compactionOutputFileCountHisto
+private finalMetricHistogram compactionOutputFileCountHisto
 
 
 
@@ -906,7 +906,7 @@ implements 
 
 compactionOutputSizeHisto
-private finalMetricHistogram compactionOutputSizeHisto
+private finalMetricHistogram compactionOutputSizeHisto
 
 
 
@@ -915,7 +915,7 @@ implements 
 
 compactedInputBytes
-private finalMutableFastCounter compactedInputBytes
+private finalMutableFastCounter compactedInputBytes
 
 
 
@@ -924,7 +924,7 @@ implements 
 
 compactedOutputBytes
-private finalMutableFastCounter compactedOutputBytes
+private finalMutableFastCounter compactedOutputBytes
 
 
 
@@ -933,7 +933,7 @@ implements 
 
 majorCompactionTimeHisto
-private finalMetricHistogram majorCompactionTimeHisto
+private finalMetricHistogram majorCompactionTimeHisto
 
 
 
@@ -942,7 +942,7 @@ implements 
 
 majorCompactionInputFileCountHisto
-private finalMetricHistogram majorCompactionInputFileCountHisto
+private finalMetricHistogram majorCompactionInputFileCountHisto
 
 
 
@@ -951,7 +951,7 @@ implements 
 
 majorCompactionInputSizeHisto
-private finalMetricHistogram majorCompactionInputSizeHisto
+private finalMetricHistogram majorCompactionInputSizeHisto
 
 
 
@@ -960,7 +960,7 @@ implements 
 
 majorCompactionOutputFileCountHisto
-private finalMetricHistogram majorCompactionOutputFileCountHisto
+private finalMetricHistogram majorCompactionOutputFileCountHisto
 
 
 
@@ -969,7 +969,7 @@ implements 
 
 majorCompactionOutputSizeHisto
-private finalMetricHistogram majorCompactionOutputSizeHisto
+private finalMetricHistogram majorCompactionOutputSizeHisto
 
 
 
@@ -978,7 +978,7 @@ implements 
 
 majorCompactedInputBytes
-private finalMutableFastCounter majorCompactedInputBytes
+private finalMutableFastCounter majorCompactedInputBytes
 
 
 
@@ -987,7 +987,7 @@ implements 
 
 majorCompactedOutputBytes
-private finalMutableFastCounter majorCompactedOutputBytes
+private finalMutableFastCounter majorCompactedOutputBytes
 
 
 
@@ -996,7 +996,7 @@ implements 
 
 infoPauseThresholdExceeded
-private finalMutableFastCounter infoPauseThresholdExceeded
+private finalMutableFastCounter infoPauseThresholdExceeded
 
 
 
@@ -1005,7 +1005,7 @@ implements 
 
 warnPauseThresholdExceeded
-private finalMutableFastCounter warnPauseThresholdExceeded
+private finalMutableFastCounter warnPauseThresholdExceeded
 
 
 
@@ -1014,7 +1014,7 @@ implements 
 
 pausesWithGc
-private finalMetricHistogram pausesWithGc
+private finalMetricHistogram pausesWithGc
 
 
 
@@ -1023,7 +1023,7 @@ implements 

[24/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index c9128a4..c5b8dbd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -63,750 +63,734 @@
 055public class RegionStates {
 056  private static final Logger LOG = 
LoggerFactory.getLogger(RegionStates.class);
 057
-058  // TODO: need to be more specific, i.e, 
OPENING vs. OPEN, CLOSING vs. CLOSED.
-059  static final State[] 
STATES_EXPECTED_ON_OPEN = new State[] {
-060State.OPEN, // State may already be 
OPEN if we died after receiving the OPEN from regionserver
-061// but before complete 
finish of AssignProcedure. HBASE-20100.
-062State.OFFLINE, State.CLOSED, 
State.ABNORMALLY_CLOSED, // disable/offline
-063State.SPLITTING, // 
ServerCrashProcedure
-064State.OPENING, State.FAILED_OPEN, // 
already in-progress (retrying)
-065State.MERGED, State.SPLITTING_NEW
-066  };
-067
-068  static final State[] 
STATES_EXPECTED_ON_CLOSE = new State[] {
-069State.SPLITTING, State.MERGING, 
State.OPENING, // ServerCrashProcedure
-070State.OPEN,   // 
enabled/open
-071State.CLOSING // 
already in-progress (retrying)
-072  };
-073
-074  // This comparator sorts the 
RegionStates by time stamp then Region name.
-075  // Comparing by timestamp alone can 
lead us to discard different RegionStates that happen
-076  // to share a timestamp.
-077  private static class 
RegionStateStampComparator implements ComparatorRegionState {
-078@Override
-079public int compare(final RegionState 
l, final RegionState r) {
-080  int stampCmp = 
Long.compare(l.getStamp(), r.getStamp());
-081  return stampCmp != 0 ? stampCmp : 
RegionInfo.COMPARATOR.compare(l.getRegion(), r.getRegion());
-082}
-083  }
-084
-085  public final static 
RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR =
-086  new RegionStateStampComparator();
-087
-088  // TODO: Replace the 
ConcurrentSkipListMaps
-089  /**
-090   * RegionName -- i.e. 
RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
-091   */
-092  private final 
ConcurrentSkipListMapbyte[], RegionStateNode regionsMap =
-093  new 
ConcurrentSkipListMapbyte[], RegionStateNode(Bytes.BYTES_COMPARATOR);
+058  // This comparator sorts the 
RegionStates by time stamp then Region name.
+059  // Comparing by timestamp alone can 
lead us to discard different RegionStates that happen
+060  // to share a timestamp.
+061  private static class 
RegionStateStampComparator implements ComparatorRegionState {
+062@Override
+063public int compare(final RegionState 
l, final RegionState r) {
+064  int stampCmp = 
Long.compare(l.getStamp(), r.getStamp());
+065  return stampCmp != 0 ? stampCmp : 
RegionInfo.COMPARATOR.compare(l.getRegion(), r.getRegion());
+066}
+067  }
+068
+069  public final static 
RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR =
+070  new RegionStateStampComparator();
+071
+072  // TODO: Replace the 
ConcurrentSkipListMaps
+073  /**
+074   * RegionName -- i.e. 
RegionInfo.getRegionName() -- as bytes to {@link RegionStateNode}
+075   */
+076  private final 
ConcurrentSkipListMapbyte[], RegionStateNode regionsMap =
+077  new 
ConcurrentSkipListMapbyte[], RegionStateNode(Bytes.BYTES_COMPARATOR);
+078
+079  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionInTransition =
+080new 
ConcurrentSkipListMapRegionInfo, 
RegionStateNode(RegionInfo.COMPARATOR);
+081
+082  /**
+083   * Regions marked as offline on a read 
of hbase:meta. Unused or at least, once
+084   * offlined, regions have no means of 
coming on line again. TODO.
+085   */
+086  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionOffline =
+087new 
ConcurrentSkipListMapRegionInfo, RegionStateNode();
+088
+089  private final 
ConcurrentSkipListMapbyte[], RegionFailedOpen regionFailedOpen =
+090new ConcurrentSkipListMapbyte[], 
RegionFailedOpen(Bytes.BYTES_COMPARATOR);
+091
+092  private final 
ConcurrentHashMapServerName, ServerStateNode serverMap =
+093  new 
ConcurrentHashMapServerName, ServerStateNode();
 094
-095  private final 
ConcurrentSkipListMapRegionInfo, RegionStateNode regionInTransition =
-096new 
ConcurrentSkipListMapRegionInfo, 
RegionStateNode(RegionInfo.COMPARATOR);
-097
-098  /**
-099   * Regions marked as offline on a read 
of hbase:meta. Unused or at least, 

[24/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/apidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
index ab5f2a7..2f19834 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
@@ -29,125 +29,140 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CompareOperator;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-032
-033/**
-034 * This filter is used to filter based on 
column value. It takes an
-035 * operator (equal, greater, not equal, 
etc) and a byte [] comparator for the
-036 * cell value.
-037 * p
-038 * This filter can be wrapped with {@link 
WhileMatchFilter} and {@link SkipFilter}
-039 * to add more control.
-040 * p
-041 * Multiple filters can be combined using 
{@link FilterList}.
-042 * p
-043 * To test the value of a single 
qualifier when scanning multiple qualifiers,
-044 * use {@link SingleColumnValueFilter}.
-045 */
-046@InterfaceAudience.Public
-047public class ValueFilter extends 
CompareFilter {
-048
-049  /**
-050   * Constructor.
-051   * @param valueCompareOp the compare op 
for value matching
-052   * @param valueComparator the 
comparator for value matching
-053   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0.
-054   * Use {@link 
#ValueFilter(CompareOperator, ByteArrayComparable)}
-055   */
-056  public ValueFilter(final CompareOp 
valueCompareOp,
-057  final ByteArrayComparable 
valueComparator) {
-058super(valueCompareOp, 
valueComparator);
-059  }
-060
-061  /**
-062   * Constructor.
-063   * @param valueCompareOp the compare op 
for value matching
-064   * @param valueComparator the 
comparator for value matching
-065   */
-066  public ValueFilter(final 
CompareOperator valueCompareOp,
-067 final 
ByteArrayComparable valueComparator) {
-068super(valueCompareOp, 
valueComparator);
-069  }
-070
-071  @Deprecated
-072  @Override
-073  public ReturnCode filterKeyValue(final 
Cell c) {
-074return filterCell(c);
-075  }
-076
-077  @Override
-078  public ReturnCode filterCell(final Cell 
c) {
-079if 
(compareValue(getCompareOperator(), this.comparator, c)) {
-080  return ReturnCode.SKIP;
-081}
-082return ReturnCode.INCLUDE;
-083  }
-084
-085  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-086@SuppressWarnings("rawtypes")  // for 
arguments
-087ArrayList arguments = 
CompareFilter.extractArguments(filterArguments);
-088CompareOperator compareOp = 
(CompareOperator)arguments.get(0);
-089ByteArrayComparable comparator = 
(ByteArrayComparable)arguments.get(1);
-090return new ValueFilter(compareOp, 
comparator);
-091  }
-092
-093  /**
-094   * @return The filter serialized using 
pb
-095   */
-096  @Override
-097  public byte [] toByteArray() {
-098FilterProtos.ValueFilter.Builder 
builder =
-099  
FilterProtos.ValueFilter.newBuilder();
-100
builder.setCompareFilter(super.convert());
-101return 
builder.build().toByteArray();
-102  }
-103
-104  /**
-105   * @param pbBytes A pb serialized 
{@link ValueFilter} instance
-106   * @return An instance of {@link 
ValueFilter} made from codebytes/code
-107   * @throws DeserializationException
-108   * @see #toByteArray
-109   */
-110  public static ValueFilter 
parseFrom(final byte [] pbBytes)
-111  throws DeserializationException {
-112FilterProtos.ValueFilter proto;
-113try {
-114  proto = 
FilterProtos.ValueFilter.parseFrom(pbBytes);
-115} catch 
(InvalidProtocolBufferException e) {
-116  throw new 
DeserializationException(e);
-117}
-118final CompareOperator valueCompareOp 
=
-119  
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
-120ByteArrayComparable valueComparator = 
null;
-121try {
-122  if 
(proto.getCompareFilter().hasComparator()) {
-123valueComparator = 
ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
-124  }
-125} catch (IOException ioe) {
-126  throw new 
DeserializationException(ioe);
-127}
-128return new 
ValueFilter(valueCompareOp,valueComparator);
-129  }
-130
-131  /**
-132   * @return true if and only if the 
fields of the filter that are serialized
-133   * are equal to the corresponding 
fields in other.  Used for testing.
-134   

[24/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column 

[24/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.html
new file mode 100644
index 000..5c5a243
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.html
@@ -0,0 +1,1212 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TransitRegionStateProcedure (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":10,"i27":10,"i28":9,"i29":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.assignment
+Class 
TransitRegionStateProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
+
+
+org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,TState
+
+
+org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedureTState
+
+
+org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState
+
+
+org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, TableProcedureInterface
+
+
+
+@InterfaceAudience.Private
+public class TransitRegionStateProcedure
+extends AbstractStateMachineRegionProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionState
+The procedure to deal with the state transition of a 
region. A region with a TRSP in place is
+ called RIT, i.e, RegionInTransition.
+ 
+ It can be used to assign/unassign/reopen/move a region, and for
+ unassign(MasterProcedureEnv,
 RegionInfo) and
+ reopen(MasterProcedureEnv,
 RegionInfo), you do not need to specify a target server, and
+ for assign(MasterProcedureEnv,
 RegionInfo, ServerName) and
+ move(MasterProcedureEnv,
 RegionInfo, ServerName), if you want to you can provide a
+ target server. And for move(MasterProcedureEnv,
 RegionInfo, ServerName), if you do not
+ specify a targetServer, we will select one randomly.
+ 
+ 
+ The typical state transition for assigning a region is:
+
+ 
+ GET_ASSIGN_CANDIDATE --> OPEN -> CONFIRM_OPENED
+ 
+
+ Notice that, if there are failures we may go back to the 
GET_ASSIGN_CANDIDATE state to
+ try again.
+ 
+ The typical state transition for unassigning a region is:
+
+ 
+ CLOSE -> CONFIRM_CLOSED
+ 
+
+ Here things go a bit different, if there are failures, especially that if 
there is a server
+ crash, we will go to the GET_ASSIGN_CANDIDATE state to bring the 
region online first, and
+ then go through the normal way to unassign it.
+ 
+ The typical state transition for reopening/moving a region is:
+
+ 
+ CLOSE -> CONFIRM_CLOSED -> GET_ASSIGN_CANDIDATE --> OPEN -> 
CONFIRM_OPENED
+ 
+
+ The retry logic is the same with the above assign/unassign.
+ 
+ Notice that, although we allow specify a target server, it just acts as a 
candidate, we do not
+ guarantee that the region will finally be on the target server. If this is 
important for you, you
+ should check whether the region is on the target server after the procedure 
is finished.
+ 
+ When you want to schedule a TRSP, please check whether there is still 

[24/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
index cca6b6f..7891753 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
@@ -43,121 +43,125 @@
 035import org.apache.hadoop.util.Tool;
 036import 
org.apache.hadoop.util.ToolRunner;
 037import 
org.apache.yetus.audience.InterfaceAudience;
-038
-039/**
-040 * Sample Uploader MapReduce
-041 * p
-042 * This is EXAMPLE code.  You will need 
to change it to work for your context.
+038import org.slf4j.Logger;
+039import org.slf4j.LoggerFactory;
+040
+041/**
+042 * Sample Uploader MapReduce
 043 * p
-044 * Uses {@link TableReducer} to put the 
data into HBase. Change the InputFormat
-045 * to suit your data.  In this example, 
we are importing a CSV file.
-046 * p
-047 * 
prerow,family,qualifier,value/pre
+044 * This is EXAMPLE code.  You will need 
to change it to work for your context.
+045 * p
+046 * Uses {@link TableReducer} to put the 
data into HBase. Change the InputFormat
+047 * to suit your data.  In this example, 
we are importing a CSV file.
 048 * p
-049 * The table and columnfamily we're to 
insert into must preexist.
+049 * 
prerow,family,qualifier,value/pre
 050 * p
-051 * There is no reducer in this example as 
it is not necessary and adds
-052 * significant overhead.  If you need to 
do any massaging of data before
-053 * inserting into HBase, you can do this 
in the map as well.
-054 * pDo the following to start the 
MR job:
-055 * pre
-056 * ./bin/hadoop 
org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
-057 * /pre
-058 * p
-059 * This code was written against HBase 
0.21 trunk.
-060 */
-061@InterfaceAudience.Private
-062public class SampleUploader extends 
Configured implements Tool {
-063
-064  private static final String NAME = 
"SampleUploader";
-065
-066  static class Uploader
-067  extends MapperLongWritable, Text, 
ImmutableBytesWritable, Put {
+051 * The table and columnfamily we're to 
insert into must preexist.
+052 * p
+053 * There is no reducer in this example as 
it is not necessary and adds
+054 * significant overhead.  If you need to 
do any massaging of data before
+055 * inserting into HBase, you can do this 
in the map as well.
+056 * pDo the following to start the 
MR job:
+057 * pre
+058 * ./bin/hadoop 
org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
+059 * /pre
+060 * p
+061 * This code was written against HBase 
0.21 trunk.
+062 */
+063@InterfaceAudience.Private
+064public class SampleUploader extends 
Configured implements Tool {
+065  private static final Logger LOG = 
LoggerFactory.getLogger(SampleUploader.class);
+066
+067  private static final String NAME = 
"SampleUploader";
 068
-069private long checkpoint = 100;
-070private long count = 0;
+069  static class Uploader
+070  extends MapperLongWritable, Text, 
ImmutableBytesWritable, Put {
 071
-072@Override
-073public void map(LongWritable key, 
Text line, Context context)
-074throws IOException {
-075
-076  // Input is a CSV file
-077  // Each map() is a single line, 
where the key is the line number
-078  // Each line is comma-delimited; 
row,family,qualifier,value
-079
-080  // Split CSV line
-081  String [] values = 
line.toString().split(",");
-082  if(values.length != 4) {
-083return;
-084  }
-085
-086  // Extract each value
-087  byte [] row = 
Bytes.toBytes(values[0]);
-088  byte [] family = 
Bytes.toBytes(values[1]);
-089  byte [] qualifier = 
Bytes.toBytes(values[2]);
-090  byte [] value = 
Bytes.toBytes(values[3]);
-091
-092  // Create Put
-093  Put put = new Put(row);
-094  put.addColumn(family, qualifier, 
value);
-095
-096  // Uncomment below to disable WAL. 
This will improve performance but means
-097  // you will experience data loss in 
the case of a RegionServer crash.
-098  // put.setWriteToWAL(false);
-099
-100  try {
-101context.write(new 
ImmutableBytesWritable(row), put);
-102  } catch (InterruptedException e) 
{
-103e.printStackTrace();
-104  }
-105
-106  // Set status every checkpoint 
lines
-107  if(++count % checkpoint == 0) {
-108context.setStatus("Emitting Put " 
+ count);
-109  }
-110}
-111  }
-112
-113  /**
-114   * Job configuration.
-115   */
-116  public static Job 
configureJob(Configuration conf, String [] args)
-117  throws IOException {
-118Path inputPath = new Path(args[0]);
-119String tableName = args[1];
-120Job job = new Job(conf, 

[24/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index b7b4236..3d1edb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + = See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * /code
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializerCacheable() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * pTODO: The caller presumes 
no checksumming
-330   * required of this block instance 
since going into cache; checksum already verified on
-331   * underlying block data pulled in from 
filesystem. Is that correct? What if cache is SSD?
+256  public static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER = new 
BlockDeserializer();
+257
+258  public static final class 
BlockDeserializer implements CacheableDeserializerCacheable {
+259private BlockDeserializer() {
+260}
+261
+262@Override
+263public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
+264throws IOException {
+265  // The buf has the file block 
followed by block metadata.
+266  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
+267  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
+268  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
+269  ByteBuff newByteBuff;
+270  if (reuse) {
+271newByteBuff = buf.slice();
+272  } else {
+273int len = buf.limit();
+274newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
+275newByteBuff.put(0, buf, 

[24/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.html
index eb21f1e..2a05f32 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.html
@@ -291,7 +291,7 @@ extends AbstractPeerProcedure
-acquireLock,
 getLatch,
 getPeerId,
 hasLock,
 holdLock,
 refreshPeer, 
releaseLock,
 rollbackState
+acquireLock,
 getLatch,
 getPeerId,
 holdLock,
 refreshPeer,
 releaseLock,
 rollbackState,
 waitInitialized
 
 
 
@@ -305,7 +305,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#setTimeout-int-">setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
index 88d6b76..b8efab2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
@@ -291,7 +291,7 @@ extends AbstractPeerProcedure
-acquireLock,
 getLatch,
 getPeerId,
 hasLock,
 holdLock,
 refreshPeer, 
releaseLock,
 rollbackState
+acquireLock,
 getLatch,
 getPeerId,
 holdLock,
 refreshPeer,
 releaseLock,
 rollbackState,
 waitInitialized
 
 
 
@@ -305,7 +305,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 <
 a 

[24/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }

[24/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/NamespaceExistException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/NamespaceExistException.html 
b/apidocs/org/apache/hadoop/hbase/NamespaceExistException.html
index ba2dc6f..e655215 100644
--- a/apidocs/org/apache/hadoop/hbase/NamespaceExistException.html
+++ b/apidocs/org/apache/hadoop/hbase/NamespaceExistException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

ç±» NamespaceExistException

+

Class NamespaceExistException

@@ -127,37 +127,37 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  • -

    Method Summary

    - -
    All

    [24/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
    index f836972..fd055a3 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
    @@ -257,7 +257,7 @@ extends ReplicationSource
    -addHFileRefs,
     enqueueLog,
     getCurrentPath,
     getPeer,
     getQueueId,
     getQueueStorage,
     getReplicationEndpoint, getServer,
     getSourceManager,
     getSourceMetrics,
     getStats,
     getWALFileLengthProvider,
     isSourceActive,
     pos
     tShipEdits, removeWorker,
     sleepForRetries,
     startup,
     terminate,
     terminate,
     terminate,
     tryThrottle,
     uncaughtException
    +addHFileRefs,
     enqueueLog,
     getCurrentPath,
     getPeer,
     getQueueId,
     getQueueStorage,
     getReplicationEndpoint, getServer,
     getSourceManager,
     getSourceMetrics,
     getStats,
     getWALFileLengthProvider,
     getWalGroupStatus,
     isSourceActive
     , postShipEdits,
     removeWorker,
     sleepForRetries,
     startup,
     terminate,
     terminate,
     terminate,
     tryThrottle,
     uncaughtException
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/Replication.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/Replication.html 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/Replication.html
    index ccbbd07..9d52369 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/Replication.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/Replication.html
    @@ -624,6 +624,8 @@ implements ReplicationSourceManagergetReplicationManager()
     Get the replication sources manager
     
    +Specified by:
    +getReplicationManagerin
     interfaceReplicationSourceService
     Returns:
     the manager if replication is enabled, else returns false
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.html
    index fc44bfc..18cea15 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.html
    @@ -18,8 +18,8 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
    -var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var methods = {"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
    +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
     var tableTab = "tableTab";
    @@ -180,7 +180,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     Method Summary
     
    -All MethodsInstance MethodsConcrete Methods
    +All MethodsStatic MethodsInstance MethodsConcrete Methods
     
     Modifier and Type
     Method and Description
    @@ -193,26 +193,32 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     
    +(package private) static long
    +calculateReplicationDelay(longageOfLastShippedOp,
    + longtimeStampOfLastShippedOp,
    + intsizeOfLogQueue)
    +
    +
     org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink
     getReplicationLoadSink()
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource
     getReplicationLoadSourceList()
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     sinkToString()
     sinkToString
     
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     sourceToString()
     sourceToString
     
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     toString()
     
    @@ -325,13 +331,24 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     
    +
    +
    +
    +
    +
    +calculateReplicationDelay
    +staticlongcalculateReplicationDelay(longageOfLastShippedOp,
    +  

    [24/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/security/access/AuthResult.Params.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/security/access/AuthResult.Params.html 
    b/devapidocs/org/apache/hadoop/hbase/security/access/AuthResult.Params.html
    index 1035156..482aaa3 100644
    --- a/devapidocs/org/apache/hadoop/hbase/security/access/AuthResult.Params.html
    +++ b/devapidocs/org/apache/hadoop/hbase/security/access/AuthResult.Params.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class AuthResult.Params
    +public static class AuthResult.Params
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -134,22 +134,26 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     Field and Description
     
     
    +private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +extraParams
    +
    +
     private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">Collection?
     families
     
    -
    +
     (package private) byte[]
     family
     
    -
    +
     private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     namespace
     
    -
    +
     (package private) byte[]
     qualifier
     
    -
    +
     private TableName
     tableName
     
    @@ -187,25 +191,34 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     AuthResult.Params
    -setFamilies(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in 
    java.util">Collection?families)
    +addExtraParam(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringkey,
    + https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringvalue)
     
     
    +private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +concatenateExtraParams()
    +
    +
    +AuthResult.Params
    +setFamilies(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in 
    java.util">Collection?families)
    +
    +
     AuthResult.Params
     setFamily(byte[]family)
     
    -
    +
     AuthResult.Params
     setNamespace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringnamespace)
     
    -
    +
     AuthResult.Params
     setQualifier(byte[]qualifier)
     
    -
    +
     AuthResult.Params
     setTableName(TableNametable)
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     toString()
     
    @@ -237,7 +250,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     namespace
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String namespace
    +privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String namespace
     
     
     
    @@ -246,7 +259,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     tableName
    -privateTableName tableName
    +privateTableName tableName
     
     
     
    @@ -255,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     families
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">Collection? families
    

    [24/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
    index 12304bd..69216ae 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer
    +private class RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer
     extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
     
     
    @@ -232,7 +232,7 @@ extends 
     
     TruncateTableProcedureBiConsumer
    -TruncateTableProcedureBiConsumer(TableNametableName)
    +TruncateTableProcedureBiConsumer(TableNametableName)
     
     
     
    @@ -249,7 +249,7 @@ extends 
     
     getOperationType
    -https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
    +https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
     
     Specified by:
     getOperationTypein
     classRawAsyncHBaseAdmin.TableProcedureBiConsumer
    
    
    

    [24/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
    index a215688..1e758ac 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
    @@ -211,105 +211,112 @@ extends 
     (package private) static class
    -StochasticLoadBalancer.LoadCandidateGenerator
    +StochasticLoadBalancer.CPRequestCostFunction
    +Compute the cost of total number of coprocessor requests  
    The more unbalanced the higher the
    + computed cost will be.
    +
     
     
     (package private) static class
    -StochasticLoadBalancer.LocalityBasedCandidateGenerator
    +StochasticLoadBalancer.LoadCandidateGenerator
     
     
     (package private) static class
    +StochasticLoadBalancer.LocalityBasedCandidateGenerator
    +
    +
    +(package private) static class
     StochasticLoadBalancer.LocalityBasedCostFunction
     Compute a cost of a potential cluster configuration based 
    upon where
      HStoreFiles are 
    located.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.MemStoreSizeCostFunction
     Compute the cost of total memstore size.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.MoveCostFunction
     Given the starting state of the regions and a potential 
    ending state
      compute cost based upon the number of regions that have moved.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction
     Compute the cost of a potential cluster state from skew in 
    number of
      primary regions on a cluster.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RackLocalityCostFunction
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RandomCandidateGenerator
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.ReadRequestCostFunction
     Compute the cost of total number of read requests  The more 
    unbalanced the higher the
      computed cost will be.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RegionCountSkewCostFunction
     Compute the cost of a potential cluster state from skew in 
    number of
      regions on a cluster.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RegionReplicaCandidateGenerator
     Generates candidates which moves the replicas out of the 
    region server for
      co-hosted region replicas
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RegionReplicaHostCostFunction
     A cost function for region replicas.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RegionReplicaRackCandidateGenerator
     Generates candidates which moves the replicas out of the 
    rack for
      co-hosted region replicas in the same rack
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.RegionReplicaRackCostFunction
     A cost function for region replicas for the rack 
    distribution.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.ServerLocalityCostFunction
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.StoreFileCostFunction
     Compute the cost of total open storefiles size.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.TableSkewCostFunction
     Compute the cost of a potential cluster configuration based 
    upon how evenly
      distributed tables are.
     
     
    -
    +
     (package private) static class
     StochasticLoadBalancer.WriteRequestCostFunction
     Compute the cost of total number of write requests.
    @@ -1015,7 +1022,7 @@ extends 
     
     setCandidateGenerators
    -protectedvoidsetCandidateGenerators(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStochasticLoadBalancer.CandidateGeneratorcustomCandidateGenerators)
    +protectedvoidsetCandidateGenerators(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStochasticLoadBalancer.CandidateGeneratorcustomCandidateGenerators)
     
     
     
    @@ -1024,7 +1031,7 @@ extends 
     
     setSlop
    -protectedvoidsetSlop(org.apache.hadoop.conf.Configurationconf)
    +protectedvoidsetSlop(org.apache.hadoop.conf.Configurationconf)
     
     Overrides:
     setSlopin
     classBaseLoadBalancer
    @@ -1037,7 +1044,7 @@ extends 
     
     setClusterMetrics
    -publicvoidsetClusterMetrics(ClusterMetricsst)
    +publicvoidsetClusterMetrics(ClusterMetricsst)
     Description copied from 
    interface:LoadBalancer
     Set the current cluster status.  This allows a LoadBalancer 
    to map host name to a server
     
    @@ -1054,7 +1061,7 @@ extends 
     
     updateMetricsSize
    -publicvoidupdateMetricsSize(intsize)
    +publicvoidupdateMetricsSize(intsize)
     Update the number of metrics that are reported to JMX
     
     
    @@ -1064,7 +1071,7 @@ extends 
     
     setMasterServices
    -publicvoidsetMasterServices(MasterServicesmasterServices)
    

    [24/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    index b6e7636..592c2cc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    @@ -356,3901 +356,3924 @@
     348  public FutureVoid 
    modifyTableAsync(TableDescriptor td) throws IOException {
     349ModifyTableResponse response = 
    executeCallable(
     350  new 
    MasterCallableModifyTableResponse(getConnection(), 
    getRpcControllerFactory()) {
    -351@Override
    -352protected ModifyTableResponse 
    rpcCall() throws Exception {
    -353  
    setPriority(td.getTableName());
    -354  ModifyTableRequest request = 
    RequestConverter.buildModifyTableRequest(
    -355td.getTableName(), td, 
    ng.getNonceGroup(), ng.newNonce());
    -356  return 
    master.modifyTable(getRpcController(), request);
    -357}
    -358  });
    -359return new ModifyTableFuture(this, 
    td.getTableName(), response);
    -360  }
    -361
    -362  @Override
    -363  public ListTableDescriptor 
    listTableDescriptorsByNamespace(byte[] name) throws IOException {
    -364return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -365getRpcControllerFactory()) {
    -366  @Override
    -367  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -368return 
    master.listTableDescriptorsByNamespace(getRpcController(),
    -369
    ListTableDescriptorsByNamespaceRequest.newBuilder()
    -370  
    .setNamespaceName(Bytes.toString(name)).build())
    -371.getTableSchemaList()
    -372.stream()
    -373
    .map(ProtobufUtil::toTableDescriptor)
    -374
    .collect(Collectors.toList());
    -375  }
    -376});
    -377  }
    -378
    -379  @Override
    -380  public ListTableDescriptor 
    listTableDescriptors(ListTableName tableNames) throws IOException {
    -381return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -382getRpcControllerFactory()) {
    -383  @Override
    -384  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -385GetTableDescriptorsRequest req 
    =
    -386
    RequestConverter.buildGetTableDescriptorsRequest(tableNames);
    -387  return 
    ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
    -388  req));
    -389  }
    -390});
    -391  }
    -392
    -393  @Override
    -394  public ListRegionInfo 
    getRegions(final ServerName sn) throws IOException {
    -395AdminService.BlockingInterface admin 
    = this.connection.getAdmin(sn);
    -396// TODO: There is no timeout on this 
    controller. Set one!
    -397HBaseRpcController controller = 
    rpcControllerFactory.newController();
    -398return 
    ProtobufUtil.getOnlineRegions(controller, admin);
    -399  }
    -400
    -401  @Override
    -402  public ListRegionInfo 
    getRegions(TableName tableName) throws IOException {
    -403if 
    (TableName.isMetaTableName(tableName)) {
    -404  return 
    Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
    -405} else {
    -406  return 
    MetaTableAccessor.getTableRegions(connection, tableName, true);
    -407}
    -408  }
    -409
    -410  private static class 
    AbortProcedureFuture extends ProcedureFutureBoolean {
    -411private boolean isAbortInProgress;
    -412
    -413public AbortProcedureFuture(
    -414final HBaseAdmin admin,
    -415final Long procId,
    -416final Boolean abortProcResponse) 
    {
    -417  super(admin, procId);
    -418  this.isAbortInProgress = 
    abortProcResponse;
    -419}
    -420
    -421@Override
    -422public Boolean get(long timeout, 
    TimeUnit unit)
    -423throws InterruptedException, 
    ExecutionException, TimeoutException {
    -424  if (!this.isAbortInProgress) {
    -425return false;
    -426  }
    -427  super.get(timeout, unit);
    -428  return true;
    -429}
    -430  }
    -431
    -432  /** @return Connection used by this 
    object. */
    -433  @Override
    -434  public Connection getConnection() {
    -435return connection;
    -436  }
    -437
    -438  @Override
    -439  public boolean tableExists(final 
    TableName tableName) throws IOException {
    -440return executeCallable(new 
    RpcRetryingCallableBoolean() {
    -441  @Override
    -442  protected Boolean rpcCall(int 
    callTimeout) throws Exception {
    -443return 
    MetaTableAccessor.tableExists(connection, tableName);
    -444  }
    -445});
    -446  }
    -447
    -448  @Override
    -449  public HTableDescriptor[] listTables() 
    throws IOException {
    -450return listTables((Pattern)null, 
    false);
    -451  }
    -452
    -453  @Override
    -454  public HTableDescriptor[] 
    listTables(Pattern 

    [24/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    index fea2b5a..c7a6cc4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    @@ -1354,816 +1354,824 @@
     1346   */
     1347  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
     1348  throws IOException {
    -1349try (Table t = 
    getMetaHTable(connection)) {
    -1350  debugLogMutations(ps);
    -1351  t.put(ps);
    -1352}
    -1353  }
    -1354
    -1355  /**
    -1356   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1357   * @param connection connection we're 
    using
    -1358   * @param d Delete to add to 
    hbase:meta
    -1359   */
    -1360  private static void 
    deleteFromMetaTable(final Connection connection, final Delete d)
    -1361  throws IOException {
    -1362ListDelete dels = new 
    ArrayList(1);
    -1363dels.add(d);
    -1364deleteFromMetaTable(connection, 
    dels);
    -1365  }
    -1366
    -1367  /**
    -1368   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1369   * @param connection connection we're 
    using
    -1370   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1371   */
    -1372  private static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1373  throws IOException {
    -1374try (Table t = 
    getMetaHTable(connection)) {
    -1375  debugLogMutations(deletes);
    -1376  t.delete(deletes);
    -1377}
    -1378  }
    -1379
    -1380  /**
    -1381   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1382   * @param metaRows rows in 
    hbase:meta
    -1383   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1384   * @param numReplicasToRemove how many 
    replicas to remove
    -1385   * @param connection connection we're 
    using to access meta table
    -1386   */
    -1387  public static void 
    removeRegionReplicasFromMeta(Setbyte[] metaRows,
    -1388int replicaIndexToDeleteFrom, int 
    numReplicasToRemove, Connection connection)
    -1389  throws IOException {
    -1390int absoluteIndex = 
    replicaIndexToDeleteFrom + numReplicasToRemove;
    -1391for (byte[] row : metaRows) {
    -1392  long now = 
    EnvironmentEdgeManager.currentTime();
    -1393  Delete deleteReplicaLocations = 
    new Delete(row);
    -1394  for (int i = 
    replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
    -1395
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1396  getServerColumn(i), now);
    -1397
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1398  getSeqNumColumn(i), now);
    -1399
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1400  getStartCodeColumn(i), now);
    -1401  }
    -1402  deleteFromMetaTable(connection, 
    deleteReplicaLocations);
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Execute the passed 
    codemutations/code against codehbase:meta/code 
    table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param mutations Puts and Deletes 
    to execute on hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    mutateMetaTable(final Connection connection,
    -1413 
    final ListMutation mutations)
    -1414throws IOException {
    -1415Table t = 
    getMetaHTable(connection);
    -1416try {
    -1417  debugLogMutations(mutations);
    -1418  t.batch(mutations, null);
    -1419} catch (InterruptedException e) {
    -1420  InterruptedIOException ie = new 
    InterruptedIOException(e.getMessage());
    -1421  ie.initCause(e);
    -1422  throw ie;
    -1423} finally {
    -1424  t.close();
    -1425}
    -1426  }
    -1427
    -1428  private static void 
    addRegionStateToPut(Put put, RegionState.State state) throws IOException {
    -1429
    put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    -1430.setRow(put.getRow())
    -1431
    .setFamily(HConstants.CATALOG_FAMILY)
    -1432
    .setQualifier(getRegionStateColumn())
    -1433
    .setTimestamp(put.getTimestamp())
    -1434.setType(Cell.Type.Put)
    -1435
    .setValue(Bytes.toBytes(state.name()))
    -1436.build());
    -1437  }
    -1438
    -1439  /**
    -1440   * Adds daughter region infos to 
    hbase:meta row for the specified region. Note that this does not
    -1441   * add its daughter's as different 
    rows, but adds information about the daughters in the same row
    -1442   * as the parent. Use
    -1443   * {@link #splitRegion(Connection, 
    RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
    -1444   * if you want to do that.
    -1445   * @param connection connection we're 
    using
    -1446   * 

    [24/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/client/VersionInfoUtil.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/VersionInfoUtil.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/VersionInfoUtil.html
    index 027f0d4..5c7f786 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/VersionInfoUtil.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/VersionInfoUtil.html
    @@ -110,7 +110,7 @@
     102   * @param versionInfo the VersionInfo 
    object to pack
     103   * @return the version number as int. 
    (e.g. 0x0103004 is 1.3.4)
     104   */
    -105  private static int 
    getVersionNumber(final HBaseProtos.VersionInfo versionInfo) {
    +105  public static int 
    getVersionNumber(final HBaseProtos.VersionInfo versionInfo) {
     106if (versionInfo != null) {
     107  try {
     108final String[] components = 
    getVersionComponents(versionInfo);
    
    
    

    [24/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    index a88ff57..6458b43 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    @@ -565,502 +565,508 @@
     557  /*
     558   * Expire the passed server.  Add it to 
    list of dead servers and queue a
     559   * shutdown processing.
    -560   */
    -561  public synchronized void 
    expireServer(final ServerName serverName) {
    -562if 
    (serverName.equals(master.getServerName())) {
    -563  if (!(master.isAborted() || 
    master.isStopped())) {
    -564master.stop("We lost our 
    znode?");
    -565  }
    -566  return;
    -567}
    -568if 
    (!master.isServerCrashProcessingEnabled()) {
    -569  LOG.info("Master doesn't enable 
    ServerShutdownHandler during initialization, "
    -570  + "delay expiring server " + 
    serverName);
    -571  // Even we delay expire this 
    server, we still need to handle Meta's RIT
    -572  // that are against the crashed 
    server; since when we do RecoverMetaProcedure,
    -573  // the SCP is not enable yet and 
    Meta's RIT may be suspend forever. See HBase-19287
    -574  
    master.getAssignmentManager().handleMetaRITOnCrashedServer(serverName);
    -575  
    this.queuedDeadServers.add(serverName);
    -576  return;
    -577}
    -578if 
    (this.deadservers.isDeadServer(serverName)) {
    -579  // TODO: Can this happen?  It 
    shouldn't be online in this case?
    -580  LOG.warn("Expiration of " + 
    serverName +
    -581  " but server shutdown already 
    in progress");
    -582  return;
    -583}
    -584
    moveFromOnlineToDeadServers(serverName);
    -585
    -586// If cluster is going down, yes, 
    servers are going to be expiring; don't
    -587// process as a dead server
    -588if (isClusterShutdown()) {
    -589  LOG.info("Cluster shutdown set; " + 
    serverName +
    -590" expired; onlineServers=" + 
    this.onlineServers.size());
    -591  if (this.onlineServers.isEmpty()) 
    {
    -592master.stop("Cluster shutdown 
    set; onlineServer=0");
    -593  }
    -594  return;
    -595}
    -596LOG.info("Processing expiration of " 
    + serverName + " on " + this.master.getServerName());
    -597
    master.getAssignmentManager().submitServerCrash(serverName, true);
    -598
    -599// Tell our listeners that a server 
    was removed
    -600if (!this.listeners.isEmpty()) {
    -601  for (ServerListener listener : 
    this.listeners) {
    -602
    listener.serverRemoved(serverName);
    -603  }
    -604}
    -605  }
    -606
    -607  @VisibleForTesting
    -608  public void 
    moveFromOnlineToDeadServers(final ServerName sn) {
    -609synchronized (onlineServers) {
    -610  if 
    (!this.onlineServers.containsKey(sn)) {
    -611LOG.warn("Expiration of " + sn + 
    " but server not online");
    -612  }
    -613  // Remove the server from the known 
    servers lists and update load info BUT
    -614  // add to deadservers first; do 
    this so it'll show in dead servers list if
    -615  // not in online servers list.
    -616  this.deadservers.add(sn);
    -617  this.onlineServers.remove(sn);
    -618  onlineServers.notifyAll();
    -619}
    -620this.rsAdmins.remove(sn);
    -621  }
    -622
    -623  public synchronized void 
    processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
    -624// When assignment manager is 
    cleaning up the zookeeper nodes and rebuilding the
    -625// in-memory region states, region 
    servers could be down. Meta table can and
    -626// should be re-assigned, log 
    splitting can be done too. However, it is better to
    -627// wait till the cleanup is done 
    before re-assigning user regions.
    -628//
    -629// We should not wait in the server 
    shutdown handler thread since it can clog
    -630// the handler threads and meta table 
    could not be re-assigned in case
    -631// the corresponding server is down. 
    So we queue them up here instead.
    -632if 
    (!master.getAssignmentManager().isFailoverCleanupDone()) {
    -633  requeuedDeadServers.put(serverName, 
    shouldSplitWal);
    -634  return;
    -635}
    -636
    -637this.deadservers.add(serverName);
    -638
    master.getAssignmentManager().submitServerCrash(serverName, shouldSplitWal);
    -639  }
    -640
    -641  /**
    -642   * Process the servers which died 
    during master's initialization. It will be
    -643   * called after HMaster#assignMeta and 
    AssignmentManager#joinCluster.
    -644   * */
    -645  synchronized void 
    processQueuedDeadServers() {
    -646if 
    (!master.isServerCrashProcessingEnabled()) {
    -647  LOG.info("Master hasn't enabled 
    ServerShutdownHandler");
    -648}
    -649IteratorServerName 
    serverIterator = queuedDeadServers.iterator();
    -650while (serverIterator.hasNext()) {
    -651  ServerName 

    [24/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
    index 3da432b..d30fa8f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
    @@ -928,7690 +928,7698 @@
     920  CollectionHStore stores = 
    this.stores.values();
     921  try {
     922// update the stores that we are 
    replaying
    -923
    stores.forEach(HStore::startReplayingFromWAL);
    -924// Recover any edits if 
    available.
    -925maxSeqId = Math.max(maxSeqId,
    -926  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -927// Make sure mvcc is up to max.
    -928this.mvcc.advanceTo(maxSeqId);
    -929  } finally {
    -930// update the stores that we are 
    done replaying
    -931
    stores.forEach(HStore::stopReplayingFromWAL);
    -932  }
    -933}
    -934this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    -935
    -936
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -937this.writestate.flushRequested = 
    false;
    -938this.writestate.compacting.set(0);
    -939
    -940if (this.writestate.writesEnabled) 
    {
    -941  // Remove temporary data left over 
    from old regions
    -942  status.setStatus("Cleaning up 
    temporary data from old regions");
    -943  fs.cleanupTempDir();
    -944}
    -945
    -946if (this.writestate.writesEnabled) 
    {
    -947  status.setStatus("Cleaning up 
    detritus from prior splits");
    -948  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -949  // these directories here on open.  
    We may be opening a region that was
    -950  // being split but we crashed in 
    the middle of it all.
    -951  fs.cleanupAnySplitDetritus();
    -952  fs.cleanupMergesDir();
    -953}
    -954
    -955// Initialize split policy
    -956this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -957
    -958// Initialize flush policy
    -959this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -960
    -961long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -962for (HStore store: stores.values()) 
    {
    -963  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -964}
    -965
    -966// Use maximum of log sequenceid or 
    that which was found in stores
    -967// (particularly if no recovered 
    edits, seqid will be -1).
    -968long maxSeqIdFromFile =
    -969  
    WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
    -970long nextSeqId = Math.max(maxSeqId, 
    maxSeqIdFromFile) + 1;
    -971if (writestate.writesEnabled) {
    -972  
    WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
    nextSeqId - 1);
    -973}
    -974
    -975LOG.info("Opened {}; next 
    sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
    -976
    -977// A region can be reopened if failed 
    a split; reset flags
    -978this.closing.set(false);
    -979this.closed.set(false);
    -980
    -981if (coprocessorHost != null) {
    -982  status.setStatus("Running 
    coprocessor post-open hooks");
    -983  coprocessorHost.postOpen();
    -984}
    +923LOG.debug("replaying wal for " + 
    this.getRegionInfo().getEncodedName());
    +924
    stores.forEach(HStore::startReplayingFromWAL);
    +925// Recover any edits if 
    available.
    +926maxSeqId = Math.max(maxSeqId,
    +927  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +928// Make sure mvcc is up to max.
    +929this.mvcc.advanceTo(maxSeqId);
    +930  } finally {
    +931LOG.debug("stopping wal replay 
    for " + this.getRegionInfo().getEncodedName());
    +932// update the stores that we are 
    done replaying
    +933
    stores.forEach(HStore::stopReplayingFromWAL);
    +934  }
    +935}
    +936this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +937
    +938
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +939this.writestate.flushRequested = 
    false;
    +940this.writestate.compacting.set(0);
    +941
    +942if (this.writestate.writesEnabled) 
    {
    +943  LOG.debug("Cleaning up temporary 
    data for " + this.getRegionInfo().getEncodedName());
    +944  // Remove temporary data left over 
    from old regions
    +945  status.setStatus("Cleaning up 
    temporary data from old regions");
    +946  fs.cleanupTempDir();
    +947}
    +948
    +949if (this.writestate.writesEnabled) 
    {
    +950  status.setStatus("Cleaning up 
    detritus from prior splits");
    +951  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    +952  // these directories here on open.  
    We may 

    [24/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
    index 9644187..b979909 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
    @@ -1199,2602 +1199,2599 @@
     1191ClusterStatusProtos.ServerLoad sl = 
    buildServerLoad(reportStartTime, reportEndTime);
     1192try {
     1193  RegionServerReportRequest.Builder 
    request = RegionServerReportRequest.newBuilder();
    -1194  ServerName sn = 
    ServerName.parseVersionedServerName(this.serverName.getVersionedBytes());
    -1195  
    request.setServer(ProtobufUtil.toServerName(sn));
    -1196  request.setLoad(sl);
    -1197  rss.regionServerReport(null, 
    request.build());
    -1198} catch (ServiceException se) {
    -1199  IOException ioe = 
    ProtobufUtil.getRemoteException(se);
    -1200  if (ioe instanceof 
    YouAreDeadException) {
    -1201// This will be caught and 
    handled as a fatal error in run()
    -1202throw ioe;
    -1203  }
    -1204  if (rssStub == rss) {
    -1205rssStub = null;
    -1206  }
    -1207  // Couldn't connect to the master, 
    get location from zk and reconnect
    -1208  // Method blocks until new master 
    is found or we are stopped
    -1209  
    createRegionServerStatusStub(true);
    -1210}
    -1211  }
    -1212
    -1213  /**
    -1214   * Reports the given map of Regions 
    and their size on the filesystem to the active Master.
    -1215   *
    -1216   * @param regionSizeStore The store 
    containing region sizes
    -1217   * @return false if 
    FileSystemUtilizationChore should pause reporting to master. true otherwise
    -1218   */
    -1219  public boolean 
    reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) {
    -1220
    RegionServerStatusService.BlockingInterface rss = rssStub;
    -1221if (rss == null) {
    -1222  // the current server could be 
    stopping.
    -1223  LOG.trace("Skipping Region size 
    report to HMaster as stub is null");
    -1224  return true;
    -1225}
    -1226try {
    -1227  buildReportAndSend(rss, 
    regionSizeStore);
    -1228} catch (ServiceException se) {
    -1229  IOException ioe = 
    ProtobufUtil.getRemoteException(se);
    -1230  if (ioe instanceof 
    PleaseHoldException) {
    -1231LOG.trace("Failed to report 
    region sizes to Master because it is initializing."
    -1232+ " This will be retried.", 
    ioe);
    -1233// The Master is coming up. Will 
    retry the report later. Avoid re-creating the stub.
    -1234return true;
    -1235  }
    -1236  if (rssStub == rss) {
    -1237rssStub = null;
    -1238  }
    -1239  
    createRegionServerStatusStub(true);
    -1240  if (ioe instanceof 
    DoNotRetryIOException) {
    -1241DoNotRetryIOException 
    doNotRetryEx = (DoNotRetryIOException) ioe;
    -1242if (doNotRetryEx.getCause() != 
    null) {
    -1243  Throwable t = 
    doNotRetryEx.getCause();
    -1244  if (t instanceof 
    UnsupportedOperationException) {
    -1245LOG.debug("master doesn't 
    support ReportRegionSpaceUse, pause before retrying");
    -1246return false;
    -1247  }
    -1248}
    -1249  }
    -1250  LOG.debug("Failed to report region 
    sizes to Master. This will be retried.", ioe);
    -1251}
    -1252return true;
    -1253  }
    -1254
    -1255  /**
    -1256   * Builds the region size report and 
    sends it to the master. Upon successful sending of the
    -1257   * report, the region sizes that were 
    sent are marked as sent.
    -1258   *
    -1259   * @param rss The stub to send to the 
    Master
    -1260   * @param regionSizeStore The store 
    containing region sizes
    -1261   */
    -1262  void 
    buildReportAndSend(RegionServerStatusService.BlockingInterface rss,
    -1263  RegionSizeStore regionSizeStore) 
    throws ServiceException {
    -1264RegionSpaceUseReportRequest request 
    =
    -1265
    buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore));
    -1266rss.reportRegionSpaceUse(null, 
    request);
    -1267// Record the number of size reports 
    sent
    -1268if (metricsRegionServer != null) {
    -1269  
    metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size());
    -1270}
    -1271  }
    -1272
    -1273  /**
    -1274   * Builds a {@link 
    RegionSpaceUseReportRequest} protobuf message from the region size map.
    -1275   *
    -1276   * @param regionSizeStore The size in 
    bytes of regions
    -1277   * @return The corresponding protocol 
    buffer message.
    -1278   */
    -1279  RegionSpaceUseReportRequest 
    buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
    -1280RegionSpaceUseReportRequest.Builder 
    request = RegionSpaceUseReportRequest.newBuilder();
    

    [24/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
    index 8b2674f..274eb54 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
    @@ -262,1413 +262,1417 @@
     254});
     255  }
     256
    -257  public void preModifyNamespace(final 
    NamespaceDescriptor ns) throws IOException {
    -258
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -259  @Override
    -260  public void call(MasterObserver 
    observer) throws IOException {
    -261observer.preModifyNamespace(this, 
    ns);
    -262  }
    -263});
    -264  }
    -265
    -266  public void postModifyNamespace(final 
    NamespaceDescriptor ns) throws IOException {
    -267
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -268  @Override
    -269  public void call(MasterObserver 
    observer) throws IOException {
    -270
    observer.postModifyNamespace(this, ns);
    -271  }
    -272});
    -273  }
    -274
    -275  public void 
    preGetNamespaceDescriptor(final String namespaceName)
    -276  throws IOException {
    -277
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -278  @Override
    -279  public void call(MasterObserver 
    observer) throws IOException {
    -280
    observer.preGetNamespaceDescriptor(this, namespaceName);
    -281  }
    -282});
    -283  }
    -284
    -285  public void 
    postGetNamespaceDescriptor(final NamespaceDescriptor ns)
    -286  throws IOException {
    -287
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -288  @Override
    -289  public void call(MasterObserver 
    observer) throws IOException {
    -290
    observer.postGetNamespaceDescriptor(this, ns);
    -291  }
    -292});
    -293  }
    -294
    -295  public void 
    preListNamespaceDescriptors(final ListNamespaceDescriptor 
    descriptors)
    -296  throws IOException {
    -297
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -298  @Override
    -299  public void call(MasterObserver 
    observer) throws IOException {
    -300
    observer.preListNamespaceDescriptors(this, descriptors);
    -301  }
    -302});
    -303  }
    -304
    -305  public void 
    postListNamespaceDescriptors(final ListNamespaceDescriptor 
    descriptors)
    -306  throws IOException {
    -307
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    -308  @Override
    -309  public void call(MasterObserver 
    observer) throws IOException {
    -310
    observer.postListNamespaceDescriptors(this, descriptors);
    -311  }
    -312});
    -313  }
    -314
    -315  /* Implementation of hooks for invoking 
    MasterObservers */
    +257  public void preModifyNamespace(final 
    NamespaceDescriptor currentNsDescriptor,
    +258final NamespaceDescriptor 
    newNsDescriptor) throws IOException {
    +259
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    +260  @Override
    +261  public void call(MasterObserver 
    observer) throws IOException {
    +262observer.preModifyNamespace(this, 
    currentNsDescriptor, newNsDescriptor);
    +263  }
    +264});
    +265  }
    +266
    +267  public void postModifyNamespace(final 
    NamespaceDescriptor oldNsDescriptor,
    +268final NamespaceDescriptor 
    currentNsDescriptor) throws IOException {
    +269
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    +270  @Override
    +271  public void call(MasterObserver 
    observer) throws IOException {
    +272
    observer.postModifyNamespace(this, oldNsDescriptor, currentNsDescriptor);
    +273  }
    +274});
    +275  }
    +276
    +277  public void 
    preGetNamespaceDescriptor(final String namespaceName)
    +278  throws IOException {
    +279
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    +280  @Override
    +281  public void call(MasterObserver 
    observer) throws IOException {
    +282
    observer.preGetNamespaceDescriptor(this, namespaceName);
    +283  }
    +284});
    +285  }
    +286
    +287  public void 
    postGetNamespaceDescriptor(final NamespaceDescriptor ns)
    +288  throws IOException {
    +289
    execOperation(coprocEnvironments.isEmpty() ? null : new 
    MasterObserverOperation() {
    +290  @Override
    +291  public void call(MasterObserver 
    observer) throws IOException {
    +292
    observer.postGetNamespaceDescriptor(this, ns);
    +293  }
    +294});
    +295  }
    +296
    +297  public void 
    preListNamespaceDescriptors(final ListNamespaceDescriptor 
    

    [24/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
    index 4a879bb..7d27402 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
    @@ -300,7 +300,7 @@
     292  private MapString, 
    com.google.protobuf.Service coprocessorServiceHandlers = 
    Maps.newHashMap();
     293
     294  // Track data size in all memstores
    -295  private final MemStoreSizing 
    memStoreSize = new MemStoreSizing();
    +295  private final MemStoreSizing 
    memStoreSizing = new ThreadSafeMemStoreSizing();
     296  private final RegionServicesForStores 
    regionServicesForStores = new RegionServicesForStores(this);
     297
     298  // Debug possible data loss due to WAL 
    off
    @@ -1218,7389 +1218,7399 @@
     1210   * Increase the size of mem store in 
    this region and the size of global mem
     1211   * store
     1212   */
    -1213  public void 
    incMemStoreSize(MemStoreSize memStoreSize) {
    -1214if (this.rsAccounting != null) {
    -1215  
    rsAccounting.incGlobalMemStoreSize(memStoreSize);
    -1216}
    -1217long dataSize;
    -1218synchronized (this.memStoreSize) {
    -1219  
    this.memStoreSize.incMemStoreSize(memStoreSize);
    -1220  dataSize = 
    this.memStoreSize.getDataSize();
    -1221}
    -1222
    checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
    -1223  }
    -1224
    -1225  public void 
    decrMemStoreSize(MemStoreSize memStoreSize) {
    -1226if (this.rsAccounting != null) {
    -1227  
    rsAccounting.decGlobalMemStoreSize(memStoreSize);
    -1228}
    -1229long size;
    -1230synchronized (this.memStoreSize) {
    -1231  
    this.memStoreSize.decMemStoreSize(memStoreSize);
    -1232  size = 
    this.memStoreSize.getDataSize();
    +1213  void incMemStoreSize(MemStoreSize mss) 
    {
    +1214incMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1215  }
    +1216
    +1217  void incMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1218if (this.rsAccounting != null) {
    +1219  
    rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1220}
    +1221long dataSize =
    +1222
    this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1223
    checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
    +1224  }
    +1225
    +1226  void decrMemStoreSize(MemStoreSize 
    mss) {
    +1227decrMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1228  }
    +1229
    +1230  void decrMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1231if (this.rsAccounting != null) {
    +1232  
    rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
     1233}
    -1234checkNegativeMemStoreDataSize(size, 
    -memStoreSize.getDataSize());
    -1235  }
    -1236
    -1237  private void 
    checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
    -1238// This is extremely bad if we make 
    memStoreSize negative. Log as much info on the offending
    -1239// caller as possible. (memStoreSize 
    might be a negative value already -- freeing memory)
    -1240if (memStoreDataSize  0) {
    -1241  LOG.error("Asked to modify this 
    region's (" + this.toString()
    -1242  + ") memStoreSize to a 
    negative value which is incorrect. Current memStoreSize="
    -1243  + (memStoreDataSize - delta) + 
    ", delta=" + delta, new Exception());
    -1244}
    -1245  }
    -1246
    -1247  @Override
    -1248  public RegionInfo getRegionInfo() {
    -1249return this.fs.getRegionInfo();
    -1250  }
    -1251
    -1252  /**
    -1253   * @return Instance of {@link 
    RegionServerServices} used by this HRegion.
    -1254   * Can be null.
    -1255   */
    -1256  RegionServerServices 
    getRegionServerServices() {
    -1257return this.rsServices;
    -1258  }
    -1259
    -1260  @Override
    -1261  public long getReadRequestsCount() {
    -1262return readRequestsCount.sum();
    -1263  }
    -1264
    -1265  @Override
    -1266  public long 
    getFilteredReadRequestsCount() {
    -1267return 
    filteredReadRequestsCount.sum();
    -1268  }
    -1269
    -1270  @Override
    -1271  public long getWriteRequestsCount() 
    {
    -1272return writeRequestsCount.sum();
    -1273  }
    -1274
    -1275  @Override
    -1276  public long getMemStoreDataSize() {
    -1277return memStoreSize.getDataSize();
    -1278  }
    -1279
    -1280  @Override
    -1281  public long getMemStoreHeapSize() {
    -1282return memStoreSize.getHeapSize();
    -1283  }
    -1284
    -1285  @Override
    -1286  public long getMemStoreOffHeapSize() 
    {
    -1287return 
    memStoreSize.getOffHeapSize();
    -1288  }
    -1289
    -1290  /** @return store services for this 
    region, to access services required by store level needs */
    -1291  

    [24/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    

    [24/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    index e1bc325..63e7421 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    @@ -66,5125 +66,5224 @@
     058import 
    java.util.concurrent.TimeoutException;
     059import 
    java.util.concurrent.atomic.AtomicBoolean;
     060import 
    java.util.concurrent.atomic.AtomicInteger;
    -061import org.apache.commons.io.IOUtils;
    -062import 
    org.apache.commons.lang3.RandomStringUtils;
    -063import 
    org.apache.commons.lang3.StringUtils;
    -064import 
    org.apache.hadoop.conf.Configuration;
    -065import 
    org.apache.hadoop.conf.Configured;
    -066import 
    org.apache.hadoop.fs.FSDataOutputStream;
    -067import org.apache.hadoop.fs.FileStatus;
    -068import org.apache.hadoop.fs.FileSystem;
    -069import org.apache.hadoop.fs.Path;
    -070import 
    org.apache.hadoop.fs.permission.FsAction;
    -071import 
    org.apache.hadoop.fs.permission.FsPermission;
    -072import 
    org.apache.hadoop.hbase.Abortable;
    -073import org.apache.hadoop.hbase.Cell;
    -074import 
    org.apache.hadoop.hbase.CellUtil;
    -075import 
    org.apache.hadoop.hbase.ClusterMetrics;
    -076import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -077import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -078import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -079import 
    org.apache.hadoop.hbase.HConstants;
    -080import 
    org.apache.hadoop.hbase.HRegionInfo;
    -081import 
    org.apache.hadoop.hbase.HRegionLocation;
    -082import 
    org.apache.hadoop.hbase.KeyValue;
    -083import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -084import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -085import 
    org.apache.hadoop.hbase.RegionLocations;
    -086import 
    org.apache.hadoop.hbase.ServerName;
    -087import 
    org.apache.hadoop.hbase.TableName;
    -088import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -089import 
    org.apache.hadoop.hbase.client.Admin;
    -090import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    -091import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -092import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -093import 
    org.apache.hadoop.hbase.client.Connection;
    -094import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -095import 
    org.apache.hadoop.hbase.client.Delete;
    -096import 
    org.apache.hadoop.hbase.client.Get;
    -097import 
    org.apache.hadoop.hbase.client.Put;
    -098import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -099import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -100import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -101import 
    org.apache.hadoop.hbase.client.Result;
    -102import 
    org.apache.hadoop.hbase.client.RowMutations;
    -103import 
    org.apache.hadoop.hbase.client.Table;
    -104import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -105import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -106import 
    org.apache.hadoop.hbase.client.TableState;
    -107import 
    org.apache.hadoop.hbase.io.FileLink;
    -108import 
    org.apache.hadoop.hbase.io.HFileLink;
    -109import 
    org.apache.hadoop.hbase.io.hfile.CacheConfig;
    -110import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -111import 
    org.apache.hadoop.hbase.log.HBaseMarkers;
    -112import 
    org.apache.hadoop.hbase.master.MasterFileSystem;
    -113import 
    org.apache.hadoop.hbase.master.RegionState;
    -114import 
    org.apache.hadoop.hbase.regionserver.HRegion;
    -115import 
    org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
    -116import 
    org.apache.hadoop.hbase.regionserver.StoreFileInfo;
    -117import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -118import 
    org.apache.hadoop.hbase.security.AccessDeniedException;
    -119import 
    org.apache.hadoop.hbase.security.UserProvider;
    -120import 
    org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
    -121import 
    org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
    -122import 
    org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
    -123import 
    org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
    -124import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
    -125import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
    -126import org.apache.hadoop.hbase.wal.WAL;
    -127import 
    org.apache.hadoop.hbase.wal.WALFactory;
    -128import 
    org.apache.hadoop.hbase.wal.WALSplitter;
    -129import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    -130import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -131import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -132import 
    org.apache.hadoop.hbase.zookeeper.ZNodePaths;
    -133import 
    org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
    -134import 
    

    [24/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    index e6e43ee..a8b77ae 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    @@ -367,650 +367,650 @@
     359lock.lock();
     360try {
     361  LOG.trace("Starting WAL Procedure 
    Store lease recovery");
    -362  FileStatus[] oldLogs = 
    getLogFiles();
    -363  while (isRunning()) {
    +362  while (isRunning()) {
    +363FileStatus[] oldLogs = 
    getLogFiles();
     364// Get Log-MaxID and recover 
    lease on old logs
     365try {
     366  flushLogId = 
    initOldLogs(oldLogs);
     367} catch (FileNotFoundException e) 
    {
     368  LOG.warn("Someone else is 
    active and deleted logs. retrying.", e);
    -369  oldLogs = getLogFiles();
    -370  continue;
    -371}
    -372
    -373// Create new state-log
    -374if (!rollWriter(flushLogId + 1)) 
    {
    -375  // someone else has already 
    created this log
    -376  LOG.debug("Someone else has 
    already created log " + flushLogId);
    -377  continue;
    -378}
    -379
    -380// We have the lease on the log
    -381oldLogs = getLogFiles();
    -382if (getMaxLogId(oldLogs)  
    flushLogId) {
    -383  if (LOG.isDebugEnabled()) {
    -384LOG.debug("Someone else 
    created new logs. Expected maxLogId  " + flushLogId);
    -385  }
    -386  
    logs.getLast().removeFile(this.walArchiveDir);
    -387  continue;
    -388}
    -389
    -390LOG.trace("Lease acquired for 
    flushLogId={}", flushLogId);
    -391break;
    -392  }
    -393} finally {
    -394  lock.unlock();
    -395}
    -396  }
    -397
    -398  @Override
    -399  public void load(final ProcedureLoader 
    loader) throws IOException {
    -400lock.lock();
    -401try {
    -402  if (logs.isEmpty()) {
    -403throw new 
    RuntimeException("recoverLease() must be called before loading data");
    -404  }
    -405
    -406  // Nothing to do, If we have only 
    the current log.
    -407  if (logs.size() == 1) {
    -408LOG.trace("No state logs to 
    replay.");
    -409loader.setMaxProcId(0);
    -410return;
    -411  }
    -412
    -413  // Load the old logs
    -414  final 
    IteratorProcedureWALFile it = logs.descendingIterator();
    -415  it.next(); // Skip the current 
    log
    -416
    -417  ProcedureWALFormat.load(it, 
    storeTracker, new ProcedureWALFormat.Loader() {
    -418@Override
    -419public void setMaxProcId(long 
    maxProcId) {
    -420  
    loader.setMaxProcId(maxProcId);
    -421}
    -422
    -423@Override
    -424public void 
    load(ProcedureIterator procIter) throws IOException {
    -425  loader.load(procIter);
    -426}
    -427
    -428@Override
    -429public void 
    handleCorrupted(ProcedureIterator procIter) throws IOException {
    -430  
    loader.handleCorrupted(procIter);
    -431}
    -432
    -433@Override
    -434public void 
    markCorruptedWAL(ProcedureWALFile log, IOException e) {
    -435  if (corruptedLogs == null) {
    -436corruptedLogs = new 
    HashSet();
    -437  }
    -438  corruptedLogs.add(log);
    -439  // TODO: sideline corrupted 
    log
    -440}
    -441  });
    -442} finally {
    -443  try {
    -444// try to cleanup inactive wals 
    and complete the operation
    -445buildHoldingCleanupTracker();
    -446tryCleanupLogsOnLoad();
    -447loading.set(false);
    -448  } finally {
    -449lock.unlock();
    -450  }
    -451}
    -452  }
    -453
    -454  private void tryCleanupLogsOnLoad() {
    -455// nothing to cleanup.
    -456if (logs.size() = 1) return;
    -457
    -458// the config says to not cleanup 
    wals on load.
    -459if 
    (!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
    -460  
    DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
    -461  LOG.debug("WALs cleanup on load is 
    not enabled: " + getActiveLogs());
    -462  return;
    -463}
    -464
    -465try {
    -466  periodicRoll();
    -467} catch (IOException e) {
    -468  LOG.warn("Unable to cleanup logs on 
    load: " + e.getMessage(), e);
    -469}
    -470  }
    -471
    -472  @Override
    -473  public void insert(final Procedure 
    proc, final Procedure[] subprocs) {
    -474if (LOG.isTraceEnabled()) {
    -475  LOG.trace("Insert " + proc + ", 
    subproc=" + Arrays.toString(subprocs));
    -476}
    -477
    -478ByteSlot slot = acquireSlot();
    -479try {
    -480  // Serialize the insert
    -481  long[] subProcIds = null;
    -482  if (subprocs != null) {
    -483
    

    [24/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/CompoundConfiguration.ImmutableConfigMap.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/CompoundConfiguration.ImmutableConfigMap.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/CompoundConfiguration.ImmutableConfigMap.html
    index 1038256..89735e7 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/CompoundConfiguration.ImmutableConfigMap.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/CompoundConfiguration.ImmutableConfigMap.html
    @@ -36,398 +36,399 @@
     028import java.util.List;
     029import java.util.Map;
     030
    -031import 
    org.apache.commons.collections4.iterators.UnmodifiableIterator;
    -032import 
    org.apache.hadoop.conf.Configuration;
    -033import 
    org.apache.hadoop.hbase.util.Bytes;
    -034import 
    org.apache.yetus.audience.InterfaceAudience;
    -035
    -036/**
    -037 * Do a shallow merge of multiple KV 
    configuration pools. This is a very useful
    -038 * utility class to easily add per-object 
    configurations in addition to wider
    -039 * scope settings. This is different from 
    Configuration.addResource()
    -040 * functionality, which performs a deep 
    merge and mutates the common data
    -041 * structure.
    -042 * p
    -043 * The iterator on CompoundConfiguration 
    is unmodifiable. Obtaining iterator is an expensive
    -044 * operation.
    -045 * p
    -046 * For clarity: the shallow merge allows 
    the user to mutate either of the
    -047 * configuration objects and have changes 
    reflected everywhere. In contrast to a
    -048 * deep merge, that requires you to 
    explicitly know all applicable copies to
    -049 * propagate changes.
    -050 * 
    -051 * WARNING: The values set in the 
    CompoundConfiguration are do not handle Property variable
    -052 * substitution.  However, if they are 
    set in the underlying configuration substitutions are
    -053 * done. 
    -054 */
    -055@InterfaceAudience.Private
    -056public class CompoundConfiguration 
    extends Configuration {
    -057
    -058  private Configuration mutableConf = 
    null;
    -059
    -060  /**
    -061   * Default Constructor. Initializes 
    empty configuration
    -062   */
    -063  public CompoundConfiguration() {
    -064  }
    -065
    -066  // Devs: these APIs are the same 
    contract as their counterparts in
    -067  // Configuration.java
    -068  private interface ImmutableConfigMap 
    extends IterableMap.EntryString,String {
    -069String get(String key);
    -070String getRaw(String key);
    -071Class? getClassByName(String 
    name) throws ClassNotFoundException;
    -072int size();
    -073  }
    -074
    -075  private final 
    ListImmutableConfigMap configs = new ArrayList();
    -076
    -077  static class ImmutableConfWrapper 
    implements  ImmutableConfigMap {
    -078   private final Configuration c;
    -079
    -080ImmutableConfWrapper(Configuration 
    conf) {
    -081  c = conf;
    -082}
    -083
    -084@Override
    -085public 
    IteratorMap.EntryString,String iterator() {
    -086  return c.iterator();
    -087}
    -088
    -089@Override
    -090public String get(String key) {
    -091  return c.get(key);
    -092}
    -093
    -094@Override
    -095public String getRaw(String key) {
    -096  return c.getRaw(key);
    -097}
    -098
    -099@Override
    -100public Class? 
    getClassByName(String name)
    -101throws ClassNotFoundException {
    -102  return c.getClassByName(name);
    -103}
    -104
    -105@Override
    -106public int size() {
    -107  return c.size();
    -108}
    -109
    -110@Override
    -111public String toString() {
    -112  return c.toString();
    -113}
    -114  }
    -115
    -116  /**
    -117   * If set has been called, it will 
    create a mutableConf.  This converts the mutableConf to an
    -118   * immutable one and resets it to allow 
    a new mutable conf.  This is used when a new map or
    -119   * conf is added to the compound 
    configuration to preserve proper override semantics.
    -120   */
    -121  void freezeMutableConf() {
    -122if (mutableConf == null) {
    -123  // do nothing if there is no 
    current mutableConf
    -124  return;
    -125}
    -126
    -127this.configs.add(0, new 
    ImmutableConfWrapper(mutableConf));
    -128mutableConf = null;
    -129  }
    -130
    -131  /**
    -132   * Add Hadoop Configuration object to 
    config list.
    -133   * The added configuration overrides 
    the previous ones if there are name collisions.
    -134   * @param conf configuration object
    -135   * @return this, for builder pattern
    -136   */
    -137  public CompoundConfiguration add(final 
    Configuration conf) {
    -138freezeMutableConf();
    -139
    -140if (conf instanceof 
    CompoundConfiguration) {
    -141  this.configs.addAll(0, 
    ((CompoundConfiguration) conf).configs);
    -142  return this;
    -143}
    -144// put new config at the front of the 
    list (top priority)
    -145this.configs.add(0, new 
    ImmutableConfWrapper(conf));
    -146return this;
    -147  }
    -148
    -149  /**
    -150   * Add Bytes map to config list. This 
    map is generally
    -151   * created by HTableDescriptor or 
    HColumnDescriptor, but can be 

    [24/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/checkstyle-aggregate.html
    --
    diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
    index c2f3d6b..839b865 100644
    --- a/checkstyle-aggregate.html
    +++ b/checkstyle-aggregate.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Checkstyle Results
     
    @@ -274,10 +274,10 @@
     Warnings
     Errors
     
    -3592
    +3593
     0
     0
    -16020
    +15918
     
     Files
     
    @@ -425,7 +425,7 @@
     org/apache/hadoop/hbase/HBaseTestCase.java
     0
     0
    -35
    +19
     
     org/apache/hadoop/hbase/HBaseTestingUtility.java
     0
    @@ -1345,7 +1345,7 @@
     org/apache/hadoop/hbase/client/HTable.java
     0
     0
    -46
    +44
     
     org/apache/hadoop/hbase/client/HTableMultiplexer.java
     0
    @@ -3000,7 +3000,7 @@
     org/apache/hadoop/hbase/filter/TimestampsFilter.java
     0
     0
    -8
    +7
     
     org/apache/hadoop/hbase/filter/ValueFilter.java
     0
    @@ -4062,290 +4062,290 @@
     0
     7
     
    -org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties
    -0
    -0
    -1
    -
     org/apache/hadoop/hbase/mapred/TableInputFormat.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
     0
     0
     6
    -
    +
     org/apache/hadoop/hbase/mapred/TableMap.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
     0
     0
     15
    -
    +
     org/apache/hadoop/hbase/mapred/TableOutputFormat.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapred/TableRecordReader.java
     0
     0
     6
    -
    +
     org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
     0
     0
     7
    -
    +
     org/apache/hadoop/hbase/mapred/TableReduce.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
     0
     0
     10
    -
    +
     org/apache/hadoop/hbase/mapred/TableSplit.java
     0
     0
     17
    -
    +
     org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapred/TestRowCounter.java
     0
     0
     12
    -
    +
     org/apache/hadoop/hbase/mapred/TestSplitTable.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapred/TestTableInputFormat.java
     0
     0
     25
    -
    +
     org/apache/hadoop/hbase/mapred/TestTableMapReduce.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/CellCounter.java
     0
     0
     9
    -
    +
     org/apache/hadoop/hbase/mapreduce/CellCreator.java
     0
     0
     6
    -
    +
     org/apache/hadoop/hbase/mapreduce/CellSerialization.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/CellSortReducer.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/CopyTable.java
     0
     0
     13
    -
    +
     org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/Driver.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapreduce/Export.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/ExportUtils.java
     0
     0
     8
    -
    +
     org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
     0
     0
     18
    -
    +
     org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapreduce/HashTable.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/Import.java
     0
     0
     10
    -
    +
     org/apache/hadoop/hbase/mapreduce/ImportTsv.java
     0
     0
     21
    -
    +
     org/apache/hadoop/hbase/mapreduce/IndexBuilder.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java
     0
     0
     10
    -
    +
     org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java
     0
     0
     5
    -
    +
     org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/JobUtil.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java
     0
     0
     4
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
     0
     0
     11
    -
    +
     org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
     0
     0
     7
    -
    +
     org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
     0
     0
     2
    -
    +
     org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java
     0
     0
     1
    -
    +
     org/apache/hadoop/hbase/mapreduce/PutCombiner.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
     0
     0
     3
    -
    +
     org/apache/hadoop/hbase/mapreduce/RowCounter.java
     0
     0
     5
    

    [24/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
    index 6d14fc8..4b9fd29 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
    @@ -127,30 +127,28 @@ the order they are declared.
     
     
     
    -private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncHBaseAdmin.compact(TableNametableName,
    +private void
    +HBaseAdmin.compact(TableNametableName,
    byte[]columnFamily,
    booleanmajor,
    CompactTypecompactType)
    -Compact column family of a table, Asynchronous operation 
    even if CompletableFuture.get()
    +Compact a table.
     
     
     
    -private void
    -HBaseAdmin.compact(TableNametableName,
    +private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +RawAsyncHBaseAdmin.compact(TableNametableName,
    byte[]columnFamily,
    booleanmajor,
    CompactTypecompactType)
    -Compact a table.
    +Compact column family of a table, Asynchronous operation 
    even if CompletableFuture.get()
     
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncAdmin.compact(TableNametableName,
    +AsyncHBaseAdmin.compact(TableNametableName,
    byte[]columnFamily,
    -   CompactTypecompactType)
    -Compact a column family within a table.
    -
    +   CompactTypecompactType)
     
     
     void
    @@ -161,14 +159,16 @@ the order they are declared.
     
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncHBaseAdmin.compact(TableNametableName,
    +void
    +HBaseAdmin.compact(TableNametableName,
    byte[]columnFamily,
    -   CompactTypecompactType)
    +   CompactTypecompactType)
    +Compact a column family within a table.
    +
     
     
    -void
    -HBaseAdmin.compact(TableNametableName,
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncAdmin.compact(TableNametableName,
    byte[]columnFamily,
    CompactTypecompactType)
     Compact a column family within a table.
    @@ -176,16 +176,14 @@ the order they are declared.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncHBaseAdmin.compact(TableNametableName,
    +RawAsyncHBaseAdmin.compact(TableNametableName,
    byte[]columnFamily,
    CompactTypecompactType)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncAdmin.compact(TableNametableName,
    -   CompactTypecompactType)
    -Compact a table.
    -
    +AsyncHBaseAdmin.compact(TableNametableName,
    +   CompactTypecompactType)
     
     
     void
    @@ -195,28 +193,28 @@ the order they are declared.
     
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncHBaseAdmin.compact(TableNametableName,
    -   CompactTypecompactType)
    -
    -
     void
     HBaseAdmin.compact(TableNametableName,
    CompactTypecompactType)
     Compact a table.
     
     
    +
    

    [24/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
    index 5d5fd7a..c93e9c2 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncBufferedMutatorBuilder.html
    @@ -121,26 +121,26 @@
     
     
     AsyncBufferedMutatorBuilder
    +AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName)
    +
    +
    +AsyncBufferedMutatorBuilder
     AsyncConnection.getBufferedMutatorBuilder(TableNametableName)
     Returns an AsyncBufferedMutatorBuilder 
    for creating AsyncBufferedMutator.
     
     
    -
    +
     AsyncBufferedMutatorBuilder
    -AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName)
    +AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName,
    + https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool)
     
    -
    +
     AsyncBufferedMutatorBuilder
     AsyncConnection.getBufferedMutatorBuilder(TableNametableName,
      https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool)
     Returns an AsyncBufferedMutatorBuilder 
    for creating AsyncBufferedMutator.
     
     
    -
    -AsyncBufferedMutatorBuilder
    -AsyncConnectionImpl.getBufferedMutatorBuilder(TableNametableName,
    - https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool)
    -
     
     AsyncBufferedMutatorBuilder
     AsyncBufferedMutatorBuilderImpl.setMaxAttempts(intmaxAttempts)
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
    index 7ac76dd..71b940f 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncConnectionImpl.html
    @@ -106,11 +106,11 @@
     
     
     private AsyncConnectionImpl
    -AsyncClientScanner.conn
    +RawAsyncTableImpl.conn
     
     
     private AsyncConnectionImpl
    -AsyncRpcRetryingCallerFactory.conn
    +AsyncBatchRpcRetryingCaller.conn
     
     
     private AsyncConnectionImpl
    @@ -118,19 +118,19 @@
     
     
     private AsyncConnectionImpl
    -RawAsyncTableImpl.conn
    +RegionCoprocessorRpcChannelImpl.conn
     
     
    -private AsyncConnectionImpl
    -RegionCoprocessorRpcChannelImpl.conn
    +protected AsyncConnectionImpl
    +AsyncRpcRetryingCaller.conn
     
     
     private AsyncConnectionImpl
    -AsyncBatchRpcRetryingCaller.conn
    +AsyncClientScanner.conn
     
     
    -protected AsyncConnectionImpl
    -AsyncRpcRetryingCaller.conn
    +private AsyncConnectionImpl
    +AsyncRpcRetryingCallerFactory.conn
     
     
     private AsyncConnectionImpl
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
    index d6b1759..e71ca45 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncMasterRequestRpcRetryingCaller.Callable.html
    @@ -105,13 +105,13 @@
     
     
     
    -private AsyncMasterRequestRpcRetryingCaller.CallableT
    -AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.callable
    -
    -
     private AsyncMasterRequestRpcRetryingCaller.CallableT
     AsyncMasterRequestRpcRetryingCaller.callable
     
    +
    +private AsyncMasterRequestRpcRetryingCaller.CallableT
    +AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.callable
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
    index f2374f5..5f86673 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncProcess.html
    +++ 

    [24/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    index ecf500c..0cd5a4e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    @@ -238,8355 +238,8368 @@
     230  public static final String 
    HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
     231  public static final int 
    DEFAULT_MAX_CELL_SIZE = 10485760;
     232
    -233  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    -234  
    "hbase.regionserver.minibatch.size";
    -235  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
    -236
    -237  /**
    -238   * This is the global default value for 
    durability. All tables/mutations not
    -239   * defining a durability or using 
    USE_DEFAULT will default to this value.
    -240   */
    -241  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +233  /**
    +234   * This is the global default value for 
    durability. All tables/mutations not
    +235   * defining a durability or using 
    USE_DEFAULT will default to this value.
    +236   */
    +237  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +238
    +239  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    +240  
    "hbase.regionserver.minibatch.size";
    +241  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
     242
    -243  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    -244
    -245  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    -246   * want to do while in closing state; 
    e.g. like offer this region up to the
    -247   * master as a region to close if the 
    carrying regionserver is overloaded.
    -248   * Once set, it is never cleared.
    -249   */
    -250  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    -251
    -252  /**
    -253   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    -254   * less that this sequence id.
    -255   */
    -256  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    -257
    -258  /**
    -259   * Record the sequence id of last flush 
    operation. Can be in advance of
    -260   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    -261   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    -262   */
    -263  private volatile long lastFlushOpSeqId 
    = HConstants.NO_SEQNUM;
    -264
    -265  /**
    -266   * The sequence id of the last replayed 
    open region event from the primary region. This is used
    -267   * to skip entries before this due to 
    the possibility of replay edits coming out of order from
    -268   * replication.
    -269   */
    -270  protected volatile long 
    lastReplayedOpenRegionSeqId = -1L;
    -271  protected volatile long 
    lastReplayedCompactionSeqId = -1L;
    -272
    -273  
    //
    -274  // Members
    -275  
    //
    -276
    -277  // map from a locked row to the context 
    for that lock including:
    -278  // - CountDownLatch for threads waiting 
    on that row
    -279  // - the thread that owns the lock 
    (allow reentrancy)
    -280  // - reference count of (reentrant) 
    locks held by the thread
    -281  // - the row itself
    -282  private final 
    ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
    -283  new ConcurrentHashMap();
    -284
    -285  protected final Mapbyte[], 
    HStore stores =
    -286  new 
    ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
    +243  public static final String 
    WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
    +244  public static final boolean 
    DEFAULT_WAL_HSYNC = false;
    +245
    +246  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    +247
    +248  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    +249   * want to do while in closing state; 
    e.g. like offer this region up to the
    +250   * master as a region to close if the 
    carrying regionserver is overloaded.
    +251   * Once set, it is never cleared.
    +252   */
    +253  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    +254
    +255  /**
    +256   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    +257   * less that this sequence id.
    +258   */
    +259  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    +260
    +261  /**
    +262   * Record the sequence id of last flush 
    operation. Can be in advance of
    +263   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    +264   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    +265   */
    +266  private volatile long lastFlushOpSeqId 
    = 

    [24/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
    index c27b109..4160a88 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
    @@ -105,7 +105,7 @@
     097try {
     098  done = waitUntilDone(startTime 
    * 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
     099} catch (InterruptedException ex) 
    {
    -100  LOG.error("Replica thread was 
    interrupted - no replica calls: " + ex.getMessage());
    +100  LOG.error("Replica thread 
    interrupted - no replica calls {}", ex.getMessage());
     101  return;
     102}
     103  }
    @@ -149,7 +149,7 @@
     141  if (loc == null) return;
     142  HRegionLocation[] locs = 
    loc.getRegionLocations();
     143  if (locs.length == 1) {
    -144LOG.warn("No replicas found for " 
    + action.getAction());
    +144LOG.warn("No replicas found for 
    {}", action.getAction());
     145return;
     146  }
     147  synchronized (replicaResultLock) 
    {
    @@ -230,8 +230,8 @@
     222  return;
     223} catch (Throwable t) {
     224  // This should not happen. 
    Let's log  retry anyway.
    -225  LOG.error("#" + asyncProcess.id 
    + ", Caught throwable while calling. This is unexpected." +
    -226  " Retrying. Server is " + 
    server + ", tableName=" + tableName, t);
    +225  LOG.error("id=" + 
    asyncProcess.id + ", caught throwable. Unexpected." +
    +226  " Retrying. Server=" + 
    server + ", tableName=" + tableName, t);
     227  
    receiveGlobalFailure(multiAction, server, numAttempt, t);
     228  return;
     229}
    @@ -247,1036 +247,1035 @@
     239}
     240  } catch (Throwable t) {
     241// Something really bad happened. 
    We are on the send thread that will now die.
    -242LOG.error("Internal AsyncProcess 
    #" + asyncProcess.id + " error for "
    -243+ tableName + " processing 
    for " + server, t);
    -244throw new RuntimeException(t);
    -245  } finally {
    -246
    asyncProcess.decTaskCounters(multiAction.getRegions(), server);
    -247if (callsInProgress != null 
     callable != null  res != null) {
    -248  
    callsInProgress.remove(callable);
    -249}
    -250  }
    -251}
    -252  }
    -253
    -254  private final 
    Batch.CallbackCResult callback;
    -255  private final BatchErrors errors;
    -256  private final 
    ConnectionImplementation.ServerErrorTracker errorsByServer;
    -257  private final ExecutorService pool;
    -258  private final 
    SetCancellableRegionServerCallable callsInProgress;
    +242LOG.error("id=" + asyncProcess.id 
    + " error for " + tableName + " processing " + server, t);
    +243throw new RuntimeException(t);
    +244  } finally {
    +245
    asyncProcess.decTaskCounters(multiAction.getRegions(), server);
    +246if (callsInProgress != null 
     callable != null  res != null) {
    +247  
    callsInProgress.remove(callable);
    +248}
    +249  }
    +250}
    +251  }
    +252
    +253  private final 
    Batch.CallbackCResult callback;
    +254  private final BatchErrors errors;
    +255  private final 
    ConnectionImplementation.ServerErrorTracker errorsByServer;
    +256  private final ExecutorService pool;
    +257  private final 
    SetCancellableRegionServerCallable callsInProgress;
    +258
     259
    -260
    -261  private final TableName tableName;
    -262  private final AtomicLong 
    actionsInProgress = new AtomicLong(-1);
    -263  /**
    -264   * The lock controls access to results. 
    It is only held when populating results where
    -265   * there might be several callers 
    (eventual consistency gets). For other requests,
    -266   * there's one unique call going on per 
    result index.
    -267   */
    -268  private final Object replicaResultLock 
    = new Object();
    -269  /**
    -270   * Result array.  Null if results are 
    not needed. Otherwise, each index corresponds to
    -271   * the action index in initial actions 
    submitted. For most request types, has null-s for
    -272   * requests that are not done, and 
    result/exception for those that are done.
    -273   * For eventual-consistency gets, 
    initially the same applies; at some point, replica calls
    -274   * might be started, and 
    ReplicaResultState is put at the corresponding indices. The
    -275   * returning calls check the type to 
    detect when this is the case. After all calls are done,
    -276   * ReplicaResultState-s are replaced 
    with results for the user.
    -277   */
    -278  private final Object[] results;
    -279  /**
    -280   * Indices of replica gets in results. 
    If null, all or no actions are replica-gets.
    -281   */
    -282  

    [24/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
    index 0d8d461..0df1ff1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
    @@ -532,14 +532,14 @@
     
     java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.util.PrettyPrinter.Unit
     org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
     org.apache.hadoop.hbase.util.Order
    -org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
     org.apache.hadoop.hbase.util.ChecksumType
    -org.apache.hadoop.hbase.util.PoolMap.PoolType
    -org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
     (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
     org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
     (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
    +org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
    +org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
     (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
    +org.apache.hadoop.hbase.util.PrettyPrinter.Unit
    +org.apache.hadoop.hbase.util.PoolMap.PoolType
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/overview-tree.html
    --
    diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
    index 2e76ee9..a9fb4aa 100644
    --- a/devapidocs/overview-tree.html
    +++ b/devapidocs/overview-tree.html
    @@ -1347,6 +1347,8 @@
     org.apache.hadoop.hbase.io.crypto.Encryption.Context
     
     
    +org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder
    +org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder.CoprocessorDescriptorImpl
     (implements org.apache.hadoop.hbase.client.CoprocessorDescriptor)
     org.apache.hadoop.hbase.coprocessor.CoprocessorHostC,E
     
     org.apache.hadoop.hbase.master.MasterCoprocessorHost
    @@ -4684,6 +4686,7 @@
     org.apache.hadoop.hbase.coprocessor.WALCoprocessor
     
     
    +org.apache.hadoop.hbase.client.CoprocessorDescriptor
     org.apache.hadoop.hbase.CoprocessorEnvironmentC
     
     org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment
    
    
    

    [24/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
    index c29e663..0ab7f22 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.html
    @@ -528,7 +528,7 @@ extends 
     
     rollbackState
    -protectedvoidrollbackState(MasterProcedureEnvenv,
    +protectedvoidrollbackState(MasterProcedureEnvenv,
      
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:StateMachineProcedure
    @@ -548,7 +548,7 @@ extends 
     
     isRollbackSupported
    -protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
    +protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
     Description copied from 
    class:StateMachineProcedure
     Used by the default implementation of abort() to know if 
    the current state can be aborted
      and rollback can be triggered.
    @@ -564,7 +564,7 @@ extends 
     
     getState
    -protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStategetState(intstateId)
    +protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStategetState(intstateId)
     Description copied from 
    class:StateMachineProcedure
     Convert an ordinal (or state id) to an Enum (or more 
    descriptive) state object.
     
    @@ -583,7 +583,7 @@ extends 
     
     getStateId
    -protectedintgetStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
    +protectedintgetStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStatestate)
     Description copied from 
    class:StateMachineProcedure
     Convert the Enum (or more descriptive) state object to an 
    ordinal (or state id).
     
    @@ -602,7 +602,7 @@ extends 
     
     getInitialState
    -protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStategetInitialState()
    +protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStategetInitialState()
     Description copied from 
    class:StateMachineProcedure
     Return the initial state object that will be used for the 
    first call to executeFromState().
     
    @@ -619,7 +619,7 @@ extends 
     
     serializeStateData
    -protectedvoidserializeStateData(ProcedureStateSerializerserializer)
    +protectedvoidserializeStateData(ProcedureStateSerializerserializer)
    throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:Procedure
     The user-level code of the procedure may have some state to
    @@ -641,7 +641,7 @@ extends 
     
     deserializeStateData
    -protectedvoiddeserializeStateData(ProcedureStateSerializerserializer)
    +protectedvoiddeserializeStateData(ProcedureStateSerializerserializer)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:Procedure
     Called on store load to allow the user to decode the 
    previously serialized
    @@ -662,7 +662,7 @@ extends 
     
     getTableOperationType
    -publicTableProcedureInterface.TableOperationTypegetTableOperationType()
    +publicTableProcedureInterface.TableOperationTypegetTableOperationType()
     Description copied from 
    interface:TableProcedureInterface
     Given an operation type we can take decisions about what to 
    do with pending operations.
      e.g. if we get a delete and we have some table operation pending (e.g. add 
    column)
    @@ -683,7 +683,7 @@ extends 
     
     getNamespaceName
    -protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetNamespaceName()
    +protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetNamespaceName()
     
     Specified by:
     getNamespaceNamein
     classAbstractStateMachineNamespaceProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState
    @@ -696,7 +696,7 @@ extends 
     
     prepareDelete
    -privatebooleanprepareDelete(MasterProcedureEnvenv)
    +privatebooleanprepareDelete(MasterProcedureEnvenv)
    

    [24/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    index a5dedc8..58373f2 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    @@ -368,24 +368,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -BufferedMutatorImpl.mutate(Mutationm)
    -
    -
    -void
     BufferedMutator.mutate(Mutationmutation)
     Sends a Mutation to 
    the table.
     
     
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncBufferedMutatorImpl.mutate(Mutationmutation)
    +
     
    +void
    +BufferedMutatorImpl.mutate(Mutationm)
    +
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
     AsyncBufferedMutator.mutate(Mutationmutation)
     Sends a Mutation to 
    the table.
     
     
    -
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncBufferedMutatorImpl.mutate(Mutationmutation)
    -
     
     
     
    @@ -403,24 +403,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -BufferedMutatorImpl.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationms)
    -
    -
    -void
     BufferedMutator.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationmutations)
     Send some Mutations to 
    the table.
     
     
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncBufferedMutatorImpl.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationmutations)
    +
     
    +void
    +BufferedMutatorImpl.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationms)
    +
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
     AsyncBufferedMutator.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationmutations)
     Send some Mutations to 
    the table.
     
     
    -
    -https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncBufferedMutatorImpl.mutate(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationmutations)
    -
     
     static RowMutations
     RowMutations.of(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Mutationmutations)
    @@ -556,15 +556,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     MutationSerialization.getDeserializer(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassMutationc)
     
     
    

    [24/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
    index 18597dd..f1686a2 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
    @@ -330,8 +330,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncHBaseAdmin.addColumnFamily(TableNametableName,
    -   ColumnFamilyDescriptorcolumnFamily)
    +AsyncAdmin.addColumnFamily(TableNametableName,
    +   ColumnFamilyDescriptorcolumnFamily)
    +Add a column family to an existing table.
    +
     
     
     void
    @@ -341,20 +343,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -void
    -HBaseAdmin.addColumnFamily(TableNametableName,
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +RawAsyncHBaseAdmin.addColumnFamily(TableNametableName,
    ColumnFamilyDescriptorcolumnFamily)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncAdmin.addColumnFamily(TableNametableName,
    -   ColumnFamilyDescriptorcolumnFamily)
    -Add a column family to an existing table.
    -
    +void
    +HBaseAdmin.addColumnFamily(TableNametableName,
    +   ColumnFamilyDescriptorcolumnFamily)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncHBaseAdmin.addColumnFamily(TableNametableName,
    +AsyncHBaseAdmin.addColumnFamily(TableNametableName,
    ColumnFamilyDescriptorcolumnFamily)
     
     
    @@ -396,8 +396,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncHBaseAdmin.modifyColumnFamily(TableNametableName,
    -  ColumnFamilyDescriptorcolumnFamily)
    +AsyncAdmin.modifyColumnFamily(TableNametableName,
    +  ColumnFamilyDescriptorcolumnFamily)
    +Modify an existing column family on a table.
    +
     
     
     void
    @@ -407,20 +409,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -void
    -HBaseAdmin.modifyColumnFamily(TableNametableName,
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +RawAsyncHBaseAdmin.modifyColumnFamily(TableNametableName,
       ColumnFamilyDescriptorcolumnFamily)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncAdmin.modifyColumnFamily(TableNametableName,
    -  ColumnFamilyDescriptorcolumnFamily)
    -Modify an existing column family on a table.
    -
    +void
    +HBaseAdmin.modifyColumnFamily(TableNametableName,
    +  ColumnFamilyDescriptorcolumnFamily)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    

    [24/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/FSTableDescriptors.html 
    b/devapidocs/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    index 2d242af..1330022 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    @@ -639,7 +639,7 @@ implements 
     
     TABLEINFO_FILESTATUS_COMPARATOR
    -static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
     title="class or interface in 
    java.util">Comparatororg.apache.hadoop.fs.FileStatus TABLEINFO_FILESTATUS_COMPARATOR
    +static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
     title="class or interface in 
    java.util">Comparatororg.apache.hadoop.fs.FileStatus TABLEINFO_FILESTATUS_COMPARATOR
     Compare FileStatus instances by 
    Path.getName(). Returns in
      reverse order.
     
    @@ -650,7 +650,7 @@ implements 
     
     TABLEINFO_PATHFILTER
    -private static finalorg.apache.hadoop.fs.PathFilter TABLEINFO_PATHFILTER
    +private static finalorg.apache.hadoop.fs.PathFilter TABLEINFO_PATHFILTER
     
     
     
    @@ -659,7 +659,7 @@ implements 
     
     WIDTH_OF_SEQUENCE_ID
    -static finalint WIDTH_OF_SEQUENCE_ID
    +static finalint WIDTH_OF_SEQUENCE_ID
     Width of the sequenceid that is a suffix on a tableinfo 
    file.
     
     See Also:
    @@ -673,7 +673,7 @@ implements 
     
     TABLEINFO_FILE_REGEX
    -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Pattern TABLEINFO_FILE_REGEX
    +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Pattern TABLEINFO_FILE_REGEX
     Regex to eat up sequenceid suffix on a .tableinfo file.
      Use regex because may encounter oldstyle .tableinfos where there is no
      sequenceid on the end.
    @@ -794,7 +794,7 @@ implements 
     
     createMetaTableDescriptor
    -public staticTableDescriptorcreateMetaTableDescriptor(org.apache.hadoop.conf.Configurationconf)
    +public staticTableDescriptorcreateMetaTableDescriptor(org.apache.hadoop.conf.Configurationconf)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -808,7 +808,7 @@ implements 
     
     setCacheOn
    -publicvoidsetCacheOn()
    +publicvoidsetCacheOn()
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    interface:TableDescriptors
     Enables the tabledescriptor cache
    @@ -826,7 +826,7 @@ implements 
     
     setCacheOff
    -publicvoidsetCacheOff()
    +publicvoidsetCacheOff()
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    interface:TableDescriptors
     Disables the tabledescriptor cache
    @@ -844,7 +844,7 @@ implements 
     
     isUsecache
    -publicbooleanisUsecache()
    +publicbooleanisUsecache()
     
     
     
    @@ -854,7 +854,7 @@ implements 
     get
     @Nullable
    -publicTableDescriptorget(TableNametablename)
    +publicTableDescriptorget(TableNametablename)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Get the current table descriptor for the given table, or 
    null if none exists.
     
    @@ -876,7 +876,7 @@ public
     
     getAll
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,TableDescriptorgetAll()
    +publichttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,TableDescriptorgetAll()
    throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Returns a map from table name to table descriptor for all 
    tables.
     
    @@ -895,7 +895,7 @@ public
     
     getByNamespace
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 

    [24/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/Admin.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/Admin.html 
    b/apidocs/org/apache/hadoop/hbase/client/Admin.html
    index f2d60a6..a7089ff 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/Admin.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/Admin.html
    @@ -101,13 +101,13 @@ var activeTableTab = "activeTableTab";
     
     
     All Superinterfaces:
    -org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
    +org.apache.hadoop.hbase.Abortable, https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     
     
     
     @InterfaceAudience.Public
     public interface Admin
    -extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
    +extends org.apache.hadoop.hbase.Abortable, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     The administrative API for HBase. Obtain an instance from 
    Connection.getAdmin()
     and
      call close()
     when done.
      Admin can be used to create, drop, list, enable and disable and otherwise 
    modify tables,
    @@ -140,8 +140,8 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
     
     
     void
    -abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringwhy,
    - http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in java.lang">Throwablee)
    +abort(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringwhy,
    + https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in java.lang">Throwablee)
     Abort the server or client.
     
     
    @@ -153,7 +153,7 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Futurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
     abortProcedureAsync(longprocId,
    booleanmayInterruptIfRunning)
     Abort a procedure but does not block and wait for 
    completion.
    @@ -178,7 +178,7 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Futurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
     addColumnFamilyAsync(TableNametableName,
     ColumnFamilyDescriptorcolumnFamily)
     Add a column family to an existing table.
    @@ -186,29 +186,29 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
     
     
     default void
    -addReplicationPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    +addReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
       ReplicationPeerConfigpeerConfig)
     Add a new replication peer for replicating data to slave 
    cluster.
     
     
     
     void
    -addReplicationPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    +addReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
       ReplicationPeerConfigpeerConfig,
     

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.StoppableThread.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.StoppableThread.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.StoppableThread.html
    deleted file mode 100644
    index 8edc8be..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.StoppableThread.html
    +++ /dev/null
    @@ -1,341 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -ProcedureExecutor.StoppableThread (Apache HBase 3.0.0-SNAPSHOT 
    API)
    -
    -
    -
    -
    -
    -var methods = {"i0":10,"i1":6};
    -var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
    -var altColor = "altColor";
    -var rowColor = "rowColor";
    -var tableTab = "tableTab";
    -var activeTableTab = "activeTableTab";
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -PrevClass
    -NextClass
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -Summary:
    -Nested|
    -Field|
    -Constr|
    -Method
    -
    -
    -Detail:
    -Field|
    -Constr|
    -Method
    -
    -
    -
    -
    -
    -
    -
    -
    -org.apache.hadoop.hbase.procedure2
    -Class 
    ProcedureExecutor.StoppableThread
    -
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">java.lang.Thread
    -
    -
    -org.apache.hadoop.hbase.procedure2.ProcedureExecutor.StoppableThread
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -All Implemented Interfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
     title="class or interface in java.lang">Runnable
    -
    -
    -Direct Known Subclasses:
    -ProcedureExecutor.TimeoutExecutorThread,
     ProcedureExecutor.WorkerThread
    -
    -
    -Enclosing class:
    -ProcedureExecutorTEnvironment
    -
    -
    -
    -private abstract static class ProcedureExecutor.StoppableThread
    -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">Thread
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Nested Class Summary
    -
    -
    -
    -
    -Nested classes/interfaces inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">Thread
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.State.html?is-external=true;
     title="class or interface in java.lang">Thread.State, http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.UncaughtExceptionHandler.html?is-external=true;
     title="class or interface in 
    java.lang">Thread.UncaughtExceptionHandler
    -
    -
    -
    -
    -
    -
    -
    -
    -Field Summary
    -
    -
    -
    -
    -Fields inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">Thread
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#MAX_PRIORITY;
     title="class or interface in java.lang">MAX_PRIORITY, http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#MIN_PRIORITY;
     title="class or interface in java.lang">MIN_PRIORITY, http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#NORM_PRIORITY;
     title="class or interface in java.lang">NORM_PRIORITY
    -
    -
    -
    -
    -
    -
    -
    -
    -Constructor Summary
    -
    -Constructors
    -
    -Constructor and Description
    -
    -
    -StoppableThread(http://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true;
     title="class or interface in java.lang">ThreadGroupgroup,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Method Summary
    -
    -All MethodsInstance MethodsAbstract MethodsConcrete Methods
    -
    -Modifier and Type
    -Method and Description
    -
    -
    -void
    -awaitTermination()
    -
    -
    -abstract void
    -sendStopSignal()
    -
    -
    -
    -
    -
    -
    -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">Thread
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#activeCount--;
     title="class or interface in java.lang">activeCount, 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
    index 28f226e..3152619 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
    @@ -328,7 +328,7 @@
     320long estimatedHeapSizeOfResult = 0;
     321// We don't make Iterator here
     322for (Cell cell : rs.rawCells()) {
    -323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedHeapSizeOf(cell);
    +323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedSizeOfCell(cell);
     324}
     325return estimatedHeapSizeOfResult;
     326  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    index 28f226e..3152619 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    @@ -328,7 +328,7 @@
     320long estimatedHeapSizeOfResult = 0;
     321// We don't make Iterator here
     322for (Cell cell : rs.rawCells()) {
    -323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedHeapSizeOf(cell);
    +323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedSizeOfCell(cell);
     324}
     325return estimatedHeapSizeOfResult;
     326  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    index 28f226e..3152619 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    @@ -328,7 +328,7 @@
     320long estimatedHeapSizeOfResult = 0;
     321// We don't make Iterator here
     322for (Cell cell : rs.rawCells()) {
    -323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedHeapSizeOf(cell);
    +323  estimatedHeapSizeOfResult += 
    PrivateCellUtil.estimatedSizeOfCell(cell);
     324}
     325return estimatedHeapSizeOfResult;
     326  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    index 18bd3f6..cfa5e40 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    @@ -496,7 +496,7 @@
     488  size * ClassSize.REFERENCE);
     489
     490  for(Cell cell : entry.getValue()) 
    {
    -491heapsize += 
    PrivateCellUtil.estimatedHeapSizeOf(cell);
    +491heapsize += 
    PrivateCellUtil.estimatedSizeOfCell(cell);
     492  }
     493}
     494heapsize += getAttributeSize();
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
    index 18bd3f6..cfa5e40 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
    @@ -496,7 +496,7 @@
     488  size * ClassSize.REFERENCE);
     489
     490  for(Cell cell : entry.getValue()) 
    {
    -491heapsize += 
    PrivateCellUtil.estimatedHeapSizeOf(cell);
    +491heapsize += 
    PrivateCellUtil.estimatedSizeOfCell(cell);
     492  }
     493}
     494heapsize += getAttributeSize();
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/client/Result.html
    --
    diff --git 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    new file mode 100644
    index 000..7edb3ff
    --- /dev/null
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    @@ -0,0 +1,3659 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +Source code
    +
    +
    +
    +
    +001/*
    +002 * Licensed to the Apache Software 
    Foundation (ASF) under one
    +003 * or more contributor license 
    agreements.  See the NOTICE file
    +004 * distributed with this work for 
    additional information
    +005 * regarding copyright ownership.  The 
    ASF licenses this file
    +006 * to you under the Apache License, 
    Version 2.0 (the
    +007 * "License"); you may not use this file 
    except in compliance
    +008 * with the License.  You may obtain a 
    copy of the License at
    +009 *
    +010 * 
    http://www.apache.org/licenses/LICENSE-2.0
    +011 *
    +012 * Unless required by applicable law or 
    agreed to in writing, software
    +013 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    +015 * See the License for the specific 
    language governing permissions and
    +016 * limitations under the License.
    +017 */
    +018package org.apache.hadoop.hbase.master;
    +019
    +020import static 
    org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
    +021
    +022import com.google.protobuf.Descriptors;
    +023import com.google.protobuf.Service;
    +024import java.io.IOException;
    +025import java.io.InterruptedIOException;
    +026import java.lang.reflect.Constructor;
    +027import 
    java.lang.reflect.InvocationTargetException;
    +028import java.net.InetAddress;
    +029import java.net.InetSocketAddress;
    +030import java.net.UnknownHostException;
    +031import java.util.ArrayList;
    +032import java.util.Arrays;
    +033import java.util.Collection;
    +034import java.util.Collections;
    +035import java.util.Comparator;
    +036import java.util.EnumSet;
    +037import java.util.HashMap;
    +038import java.util.Iterator;
    +039import java.util.List;
    +040import java.util.Map;
    +041import java.util.Map.Entry;
    +042import java.util.Objects;
    +043import java.util.Set;
    +044import 
    java.util.concurrent.ExecutionException;
    +045import java.util.concurrent.Future;
    +046import java.util.concurrent.TimeUnit;
    +047import 
    java.util.concurrent.TimeoutException;
    +048import 
    java.util.concurrent.atomic.AtomicInteger;
    +049import 
    java.util.concurrent.atomic.AtomicReference;
    +050import java.util.function.Function;
    +051import java.util.regex.Pattern;
    +052import java.util.stream.Collectors;
    +053import javax.servlet.ServletException;
    +054import javax.servlet.http.HttpServlet;
    +055import 
    javax.servlet.http.HttpServletRequest;
    +056import 
    javax.servlet.http.HttpServletResponse;
    +057import 
    org.apache.commons.lang3.StringUtils;
    +058import 
    org.apache.hadoop.conf.Configuration;
    +059import org.apache.hadoop.fs.Path;
    +060import 
    org.apache.hadoop.hbase.ClusterId;
    +061import 
    org.apache.hadoop.hbase.ClusterMetrics;
    +062import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    +063import 
    org.apache.hadoop.hbase.ClusterMetricsBuilder;
    +064import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    +065import 
    org.apache.hadoop.hbase.HBaseIOException;
    +066import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    +067import 
    org.apache.hadoop.hbase.HConstants;
    +068import 
    org.apache.hadoop.hbase.InvalidFamilyOperationException;
    +069import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    +070import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    +071import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    +072import 
    org.apache.hadoop.hbase.PleaseHoldException;
    +073import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
    +074import 
    org.apache.hadoop.hbase.ServerLoad;
    +075import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    +076import 
    org.apache.hadoop.hbase.ServerName;
    +077import 
    org.apache.hadoop.hbase.TableDescriptors;
    +078import 
    org.apache.hadoop.hbase.TableName;
    +079import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    +080import 
    org.apache.hadoop.hbase.TableNotFoundException;
    +081import 
    org.apache.hadoop.hbase.UnknownRegionException;
    +082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    +083import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    +084import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    +085import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +086import 
    org.apache.hadoop.hbase.client.Result;
    +087import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    +088import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    +089import 
    org.apache.hadoop.hbase.client.TableState;
    +090import 
    org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
    +091import 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
    index 5e1590b..d481372 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
    @@ -126,15 +126,15 @@
     
     
     private RpcRetryingCallerFactory
    -ConnectionImplementation.rpcCallerFactory
    +RegionCoprocessorRpcChannel.rpcCallerFactory
     
     
     private RpcRetryingCallerFactory
    -HTable.rpcCallerFactory
    +ConnectionImplementation.rpcCallerFactory
     
     
     private RpcRetryingCallerFactory
    -RegionCoprocessorRpcChannel.rpcCallerFactory
    +HTable.rpcCallerFactory
     
     
     private RpcRetryingCallerFactory
    @@ -155,21 +155,21 @@
     
     
     RpcRetryingCallerFactory
    -ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
    -Returns a new RpcRetryingCallerFactory from the given 
    Configuration.
    -
    +ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
     
     
     RpcRetryingCallerFactory
    -ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
    +ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
    +Returns a new RpcRetryingCallerFactory from the given 
    Configuration.
    +
     
     
     RpcRetryingCallerFactory
    -ClusterConnection.getRpcRetryingCallerFactory()
    +ConnectionImplementation.getRpcRetryingCallerFactory()
     
     
     RpcRetryingCallerFactory
    -ConnectionImplementation.getRpcRetryingCallerFactory()
    +ClusterConnection.getRpcRetryingCallerFactory()
     
     
     static RpcRetryingCallerFactory
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
    index 018438c..6384833 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
    @@ -283,14 +283,6 @@ service.
     
     
     private Scan
    -AsyncScanSingleRegionRpcRetryingCaller.scan
    -
    -
    -protected Scan
    -ScannerCallable.scan
    -
    -
    -private Scan
     ScannerCallableWithReplicas.scan
     
     
    @@ -307,6 +299,14 @@ service.
     
     
     private Scan
    +AsyncScanSingleRegionRpcRetryingCaller.scan
    +
    +
    +protected Scan
    +ScannerCallable.scan
    +
    +
    +private Scan
     TableSnapshotScanner.scan
     
     
    @@ -339,11 +339,11 @@ service.
     
     
     protected Scan
    -ScannerCallable.getScan()
    +ClientScanner.getScan()
     
     
     protected Scan
    -ClientScanner.getScan()
    +ScannerCallable.getScan()
     
     
     Scan
    @@ -638,29 +638,29 @@ service.
     
     
     ResultScanner
    -RawAsyncTableImpl.getScanner(Scanscan)
    -
    -
    -ResultScanner
    -HTable.getScanner(Scanscan)
    -The underlying HTable must 
    not be closed.
    +AsyncTable.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan 
    object.
     
     
    -
    +
     ResultScanner
     Table.getScanner(Scanscan)
     Returns a scanner on the current table as specified by the 
    Scan
      object.
     
     
    -
    +
     ResultScanner
     AsyncTableImpl.getScanner(Scanscan)
     
    +
    +ResultScanner
    +RawAsyncTableImpl.getScanner(Scanscan)
    +
     
     ResultScanner
    -AsyncTable.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan 
    object.
    +HTable.getScanner(Scanscan)
    +The underlying HTable must 
    not be closed.
     
     
     
    @@ -703,7 +703,9 @@ service.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
    -RawAsyncTableImpl.scanAll(Scanscan)
    +AsyncTable.scanAll(Scanscan)
    +Return all the results that match the given scan 
    object.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
    @@ -711,9 +713,7 @@ service.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
    -AsyncTable.scanAll(Scanscan)
    -Return all the results that match the given scan 
    object.
    -
    +RawAsyncTableImpl.scanAll(Scanscan)
     
     
     private Scan
    @@ 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    index 4c96d78..f3e8e8b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    @@ -620,72 +620,72 @@ service.
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
    @@ -718,27 +718,27 @@ service.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTable.put(Putput)
    -Puts some data to the table.
    -
    +RawAsyncTableImpl.put(Putput)
     
     
     void
    +HTable.put(Putput)
    +
    +
    +void
     Table.put(Putput)
     Puts some data in the table.
     
     
    -
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTableImpl.put(Putput)
    -
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncTableImpl.put(Putput)
    +AsyncTableImpl.put(Putput)
     
     
    -void
    -HTable.put(Putput)
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncTable.put(Putput)
    +Puts some data to the table.
    +
     
     
     boolean
    @@ -757,27 +757,27 @@ service.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
    +RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
     boolean
    -Table.CheckAndMutateBuilder.thenPut(Putput)
    +HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
    +boolean
    +Table.CheckAndMutateBuilder.thenPut(Putput)
     
     
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    index 78d979d..81b1f23 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    @@ -292,7 +292,7 @@ service.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -301,7 +301,7 @@ service.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -309,14 +309,14 @@ service.
     
     
     
    -static RegionLocations
    -MetaTableAccessor.getRegionLocations(Resultr)
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    +AsyncMetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    -AsyncMetaTableAccessor.getRegionLocations(Resultr)
    +static RegionLocations
    +MetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
    @@ -326,42 +326,42 @@ service.
     
     
     private static long
    -MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
     private static long
    -AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
    -static ServerName
    -MetaTableAccessor.getServerName(Resultr,
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    +AsyncMetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    -AsyncMetaTableAccessor.getServerName(Resultr,
    +static ServerName
    +MetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    +AsyncMetaTableAccessor.getTableState(Resultr)
    +
    +
     static TableState
     MetaTableAccessor.getTableState(Resultr)
     Decode table state from META Result.
     
     
    -
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    -AsyncMetaTableAccessor.getTableState(Resultr)
    -
     
     void
     AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
    @@ -457,13 +457,13 @@ service.
     ClientScanner.cache
     
     
    -private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    -BatchScanResultCache.partialResults
    -
    -
     private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
     CompleteScanResultCache.partialResults
     
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    +BatchScanResultCache.partialResults
    +
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
     title="class or interface in java.util">QueueResult
     AsyncTableResultScanner.queue
    @@ -486,7 +486,7 @@ service.
     
     
     Result[]
    -BatchScanResultCache.addAndGet(Result[]results,
    +AllowPartialScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
    @@ -496,20 +496,24 @@ service.
     
     
     Result[]
    -AllowPartialScanResultCache.addAndGet(Result[]results,
    +BatchScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
     Result
    -HTable.append(Appendappend)
    -
    -
    -Result
     Table.append(Appendappend)
     Appends values to one or more columns within a single 
    row.
     
     
    +
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    index f9a42eb..720740a 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    @@ -849,23 +849,23 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private Connection
    -RestoreTablesClient.conn
    +BackupAdminImpl.conn
     
     
    -protected Connection
    -TableBackupClient.conn
    -
    -
     (package private) Connection
     BackupCommands.Command.conn
     
    +
    +private Connection
    +RestoreTablesClient.conn
    +
     
     protected Connection
    -BackupManager.conn
    +TableBackupClient.conn
     
     
    -private Connection
    -BackupAdminImpl.conn
    +protected Connection
    +BackupManager.conn
     
     
     private Connection
    @@ -1179,13 +1179,13 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     
    -(package private) Connection
    -ConnectionImplementation.MasterServiceState.connection
    -
    -
     private Connection
     RegionServerCallable.connection
     
    +
    +(package private) Connection
    +ConnectionImplementation.MasterServiceState.connection
    +
     
     
     
    @@ -1230,20 +1230,20 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     
    -Connection
    -Admin.getConnection()
    -
    -
     (package private) Connection
     RegionAdminServiceCallable.getConnection()
     
    -
    +
     protected Connection
     HTable.getConnection()
     INTERNAL Used by unit tests and tools to do 
    low-level
      manipulations.
     
     
    +
    +Connection
    +Admin.getConnection()
    +
     
     Connection
     HBaseAdmin.getConnection()
    @@ -1557,11 +1557,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     private Connection
    -TableInputFormatBase.connection
    +HRegionPartitioner.connection
     
     
     private Connection
    -HRegionPartitioner.connection
    +TableInputFormatBase.connection
     
     
     
    @@ -1594,22 +1594,22 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     
    -private Connection
    -TableOutputFormat.TableRecordWriter.connection
    -
    -
     (package private) Connection
     MultiTableOutputFormat.MultiTableRecordWriter.connection
     
    +
    +private Connection
    +HRegionPartitioner.connection
    +
     
     private Connection
    -TableInputFormatBase.connection
    -The underlying Connection 
    of the table.
    -
    +TableOutputFormat.TableRecordWriter.connection
     
     
     private Connection
    -HRegionPartitioner.connection
    +TableInputFormatBase.connection
    +The underlying Connection 
    of the table.
    +
     
     
     (package private) Connection
    @@ -1694,15 +1694,15 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     private Connection
    -CatalogJanitor.connection
    +RegionPlacementMaintainer.connection
     
     
     private Connection
    -SnapshotOfRegionAssignmentFromMeta.connection
    +CatalogJanitor.connection
     
     
     private Connection
    -RegionPlacementMaintainer.connection
    +SnapshotOfRegionAssignmentFromMeta.connection
     
     
     
    @@ -1731,7 +1731,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -static void
    +private void
     TableStateManager.fixTableStates(TableDescriptorstableDescriptors,
       Connectionconnection)
     
    @@ -1839,31 +1839,31 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     private Connection
    -TableQuotaSnapshotStore.conn
    +QuotaObserverChore.conn
     
     
     private Connection
    -SpaceQuotaRefresherChore.conn
    +QuotaObserverChore.TablesWithQuotas.conn
     
     
     private Connection
    -NamespaceQuotaSnapshotStore.conn
    +SnapshotQuotaObserverChore.conn
     
     
     private Connection
    -SnapshotQuotaObserverChore.conn
    +NamespaceQuotaSnapshotStore.conn
     
     
     private Connection
    -QuotaObserverChore.conn
    +TableQuotaSnapshotStore.conn
     
     
     private Connection
    -QuotaObserverChore.TablesWithQuotas.conn
    +TableSpaceQuotaSnapshotNotifier.conn
     
     
     private Connection
    -TableSpaceQuotaSnapshotNotifier.conn
    +SpaceQuotaRefresherChore.conn
     
     
     private Connection
    @@ -2197,11 +2197,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     Connection
    -RegionCoprocessorHost.RegionEnvironment.createConnection(org.apache.hadoop.conf.Configurationconf)
    +HRegionServer.createConnection(org.apache.hadoop.conf.Configurationconf)
     
     
     Connection
    -HRegionServer.createConnection(org.apache.hadoop.conf.Configurationconf)
    +RegionCoprocessorHost.RegionEnvironment.createConnection(org.apache.hadoop.conf.Configurationconf)
     
     
     Connection
    @@ -2209,11 +2209,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     
     
     Connection
    -RegionCoprocessorHost.RegionEnvironment.getConnection()
    +HRegionServer.getConnection()
     
     
     Connection
    -HRegionServer.getConnection()
    +RegionCoprocessorHost.RegionEnvironment.getConnection()
     
     
     
    @@ -2247,11 +2247,11 @@ Input/OutputFormats, a table 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    index 78d979d..81b1f23 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    @@ -292,7 +292,7 @@ service.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -301,7 +301,7 @@ service.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -309,14 +309,14 @@ service.
     
     
     
    -static RegionLocations
    -MetaTableAccessor.getRegionLocations(Resultr)
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    +AsyncMetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    -AsyncMetaTableAccessor.getRegionLocations(Resultr)
    +static RegionLocations
    +MetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
    @@ -326,42 +326,42 @@ service.
     
     
     private static long
    -MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
     private static long
    -AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
    -static ServerName
    -MetaTableAccessor.getServerName(Resultr,
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    +AsyncMetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    -AsyncMetaTableAccessor.getServerName(Resultr,
    +static ServerName
    +MetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    +AsyncMetaTableAccessor.getTableState(Resultr)
    +
    +
     static TableState
     MetaTableAccessor.getTableState(Resultr)
     Decode table state from META Result.
     
     
    -
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    -AsyncMetaTableAccessor.getTableState(Resultr)
    -
     
     void
     AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
    @@ -457,13 +457,13 @@ service.
     ClientScanner.cache
     
     
    -private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    -BatchScanResultCache.partialResults
    -
    -
     private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
     CompleteScanResultCache.partialResults
     
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    +BatchScanResultCache.partialResults
    +
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
     title="class or interface in java.util">QueueResult
     AsyncTableResultScanner.queue
    @@ -486,7 +486,7 @@ service.
     
     
     Result[]
    -BatchScanResultCache.addAndGet(Result[]results,
    +AllowPartialScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
    @@ -496,20 +496,24 @@ service.
     
     
     Result[]
    -AllowPartialScanResultCache.addAndGet(Result[]results,
    +BatchScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
     Result
    -HTable.append(Appendappend)
    -
    -
    -Result
     Table.append(Appendappend)
     Appends values to one or more columns within a single 
    row.
     
     
    +
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    index e3d9f70..35f0e35 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    @@ -208,9 +208,9 @@ service.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family)
    -Gets a scanner on the current table for the given 
    family.
    +ResultScanner
    +HTable.getScanner(byte[]family)
    +The underlying HTable must 
    not be closed.
     
     
     
    @@ -220,16 +220,16 @@ service.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family)
    -The underlying HTable must 
    not be closed.
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family)
    +Gets a scanner on the current table for the given 
    family.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family,
    +ResultScanner
    +HTable.getScanner(byte[]family,
       byte[]qualifier)
    -Gets a scanner on the current table for the given family 
    and qualifier.
    +The underlying HTable must 
    not be closed.
     
     
     
    @@ -240,37 +240,37 @@ service.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family,
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family,
       byte[]qualifier)
    -The underlying HTable must 
    not be closed.
    +Gets a scanner on the current table for the given family 
    and qualifier.
     
     
     
     ResultScanner
    -AsyncTable.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan 
    object.
    -
    +RawAsyncTableImpl.getScanner(Scanscan)
     
     
     ResultScanner
    -Table.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan
    - object.
    +HTable.getScanner(Scanscan)
    +The underlying HTable must 
    not be closed.
     
     
     
     ResultScanner
    -AsyncTableImpl.getScanner(Scanscan)
    +Table.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan
    + object.
    +
     
     
     ResultScanner
    -RawAsyncTableImpl.getScanner(Scanscan)
    +AsyncTableImpl.getScanner(Scanscan)
     
     
     ResultScanner
    -HTable.getScanner(Scanscan)
    -The underlying HTable must 
    not be closed.
    +AsyncTable.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan 
    object.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    index b1d1cef..d730879 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    @@ -106,11 +106,11 @@
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFuture.getErrors()
    +AsyncRequestFutureImpl.getErrors()
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFutureImpl.getErrors()
    +AsyncRequestFuture.getErrors()
     
     
     (package private) RetriesExhaustedWithDetailsException
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    index 0a290e1..9642faa 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    @@ -234,28 +234,36 @@
     
     
     
    +T
    +RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    +  intcallTimeout)
    +
    +
     T
     RpcRetryingCaller.callWithoutRetries(RetryingCallableTcallable,
       intcallTimeout)
     Call the server once only.
     
     
    -
    +
     T
    -RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    -  intcallTimeout)
    +RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    +   intcallTimeout)
     
    -
    +
     T
     RpcRetryingCaller.callWithRetries(RetryingCallableTcallable,
    intcallTimeout)
     Retries if invocation fails.
     
     
    +
    +RetryingCallerInterceptorContext
    +NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable)
    +
     
    -T
    -RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    -   intcallTimeout)
    +FastFailInterceptorContext
    +FastFailInterceptorContext.prepare(RetryingCallable?callable)
     
     
     abstract RetryingCallerInterceptorContext
    @@ -267,11 +275,13 @@
     
     
     

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
    index ad601c4..53e455f 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
    @@ -1117,1183 +1117,1186 @@
     1109  @Nullable
     1110  public static TableState 
    getTableState(Connection conn, TableName tableName)
       throws IOException {
    -1112Table metaHTable = 
    getMetaHTable(conn);
    -1113Get get = new 
    Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
    -1114long time = 
    EnvironmentEdgeManager.currentTime();
    -1115get.setTimeRange(0, time);
    -1116Result result =
    -1117metaHTable.get(get);
    -1118return getTableState(result);
    -1119  }
    -1120
    -1121  /**
    -1122   * Fetch table states from META 
    table
    -1123   * @param conn connection to use
    -1124   * @return map {tableName -gt; 
    state}
    -1125   * @throws IOException
    -1126   */
    -1127  public static MapTableName, 
    TableState getTableStates(Connection conn)
    -1128  throws IOException {
    -1129final MapTableName, 
    TableState states = new LinkedHashMap();
    -1130Visitor collector = new Visitor() 
    {
    -1131  @Override
    -1132  public boolean visit(Result r) 
    throws IOException {
    -1133TableState state = 
    getTableState(r);
    -1134if (state != null)
    -1135  
    states.put(state.getTableName(), state);
    -1136return true;
    -1137  }
    -1138};
    -1139fullScanTables(conn, collector);
    -1140return states;
    -1141  }
    -1142
    -1143  /**
    -1144   * Updates state in META
    -1145   * @param conn connection to use
    -1146   * @param tableName table to look 
    for
    -1147   * @throws IOException
    -1148   */
    -1149  public static void 
    updateTableState(Connection conn, TableName tableName,
    -1150  TableState.State actual) throws 
    IOException {
    -1151updateTableState(conn, new 
    TableState(tableName, actual));
    -1152  }
    -1153
    -1154  /**
    -1155   * Decode table state from META 
    Result.
    -1156   * Should contain cell from 
    HConstants.TABLE_FAMILY
    -1157   * @param r result
    -1158   * @return null if not found
    -1159   * @throws IOException
    -1160   */
    -1161  @Nullable
    -1162  public static TableState 
    getTableState(Result r)
    -1163  throws IOException {
    -1164Cell cell = 
    r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
    -1165if (cell == null) return null;
    -1166try {
    -1167  return 
    TableState.parseFrom(TableName.valueOf(r.getRow()),
    -1168  
    Arrays.copyOfRange(cell.getValueArray(),
    -1169  cell.getValueOffset(), 
    cell.getValueOffset() + cell.getValueLength()));
    -1170} catch (DeserializationException e) 
    {
    -1171  throw new IOException(e);
    -1172}
    -1173
    -1174  }
    -1175
    -1176  /**
    -1177   * Implementations 'visit' a catalog 
    table row.
    -1178   */
    -1179  public interface Visitor {
    -1180/**
    -1181 * Visit the catalog table row.
    -1182 * @param r A row from catalog 
    table
    -1183 * @return True if we are to proceed 
    scanning the table, else false if
    -1184 * we are to stop now.
    -1185 */
    -1186boolean visit(final Result r) throws 
    IOException;
    -1187  }
    -1188
    -1189  /**
    -1190   * Implementations 'visit' a catalog 
    table row but with close() at the end.
    -1191   */
    -1192  public interface CloseableVisitor 
    extends Visitor, Closeable {
    -1193  }
    -1194
    -1195  /**
    -1196   * A {@link Visitor} that collects 
    content out of passed {@link Result}.
    -1197   */
    -1198  static abstract class 
    CollectingVisitorT implements Visitor {
    -1199final ListT results = new 
    ArrayList();
    -1200@Override
    -1201public boolean visit(Result r) 
    throws IOException {
    -1202  if (r ==  null || r.isEmpty()) 
    return true;
    -1203  add(r);
    -1204  return true;
    -1205}
    -1206
    -1207abstract void add(Result r);
    -1208
    -1209/**
    -1210 * @return Collected results; wait 
    till visits complete to collect all
    -1211 * possible results
    -1212 */
    -1213ListT getResults() {
    -1214  return this.results;
    -1215}
    -1216  }
    -1217
    -1218  /**
    -1219   * Collects all returned.
    -1220   */
    -1221  static class CollectAllVisitor extends 
    CollectingVisitorResult {
    -1222@Override
    -1223void add(Result r) {
    -1224  this.results.add(r);
    -1225}
    -1226  }
    -1227
    -1228  /**
    -1229   * A Visitor that skips offline 
    regions and split parents
    -1230   */
    -1231  public static abstract class 
    DefaultVisitorBase implements Visitor {
    -1232
    -1233public DefaultVisitorBase() {
    -1234  super();
    -1235}
    -1236
    -1237public abstract boolean 
    visitInternal(Result rowResult) throws IOException;
    -1238
    -1239@Override
    -1240public boolean visit(Result 
    rowResult) throws 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
    b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    index 46cee7a..c1441ad 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    @@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
     
     @InterfaceAudience.LimitedPrivate(value="Tools")
      @InterfaceStability.Evolving
    -public class HBaseFsck
    +public class HBaseFsck
     extends org.apache.hadoop.conf.Configured
     implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     HBaseFsck (hbck) is a tool for checking and repairing 
    region consistency and
    @@ -1376,7 +1376,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_TIME_LAG
    -public static finallong DEFAULT_TIME_LAG
    +public static finallong DEFAULT_TIME_LAG
     
     See Also:
     Constant
     Field Values
    @@ -1389,7 +1389,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_SLEEP_BEFORE_RERUN
    -public static finallong DEFAULT_SLEEP_BEFORE_RERUN
    +public static finallong DEFAULT_SLEEP_BEFORE_RERUN
     
     See Also:
     Constant
     Field Values
    @@ -1402,7 +1402,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     MAX_NUM_THREADS
    -private static finalint MAX_NUM_THREADS
    +private static finalint MAX_NUM_THREADS
     
     See Also:
     Constant
     Field Values
    @@ -1415,7 +1415,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     rsSupportsOffline
    -private staticboolean rsSupportsOffline
    +private staticboolean rsSupportsOffline
     
     
     
    @@ -1424,7 +1424,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_OVERLAPS_TO_SIDELINE
    -private static finalint DEFAULT_OVERLAPS_TO_SIDELINE
    +private static finalint DEFAULT_OVERLAPS_TO_SIDELINE
     
     See Also:
     Constant
     Field Values
    @@ -1437,7 +1437,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_MAX_MERGE
    -private static finalint DEFAULT_MAX_MERGE
    +private static finalint DEFAULT_MAX_MERGE
     
     See Also:
     Constant
     Field Values
    @@ -1450,7 +1450,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     TO_BE_LOADED
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String TO_BE_LOADED
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String TO_BE_LOADED
     
     See Also:
     Constant
     Field Values
    @@ -1463,7 +1463,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     HBCK_LOCK_FILE
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String HBCK_LOCK_FILE
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String HBCK_LOCK_FILE
     
     See Also:
     Constant
     Field Values
    @@ -1476,7 +1476,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_MAX_LOCK_FILE_ATTEMPTS
    -private static finalint DEFAULT_MAX_LOCK_FILE_ATTEMPTS
    +private static finalint DEFAULT_MAX_LOCK_FILE_ATTEMPTS
     
     See Also:
     Constant
     Field Values
    @@ -1489,7 +1489,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL
    -private static finalint DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL
    +private static finalint DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL
     
     See Also:
     Constant
     Field Values
    @@ -1502,7 +1502,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME
    -private static finalint DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME
    +private static finalint DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME
     
     See Also:
     Constant
     Field Values
    @@ -1515,7 +1515,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_WAIT_FOR_LOCK_TIMEOUT
    -private static finalint DEFAULT_WAIT_FOR_LOCK_TIMEOUT
    +private static finalint DEFAULT_WAIT_FOR_LOCK_TIMEOUT
     
     See Also:
     Constant
     Field Values
    @@ -1528,7 +1528,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS
    -private static finalint DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS
    +private static finalint DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS
     
     See Also:
     Constant
     Field Values
    @@ -1541,7 +1541,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL
    -private static finalint 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.html 
    b/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.html
    index 004af5c..662ce9f 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestFullBackupWithFailures
    +public class TestFullBackupWithFailures
     extends TestBackupBase
     
     
    @@ -150,6 +150,10 @@ extends Field and Description
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     private static org.slf4j.Logger
     LOG
     
    @@ -229,13 +233,22 @@ extends 
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -252,7 +265,7 @@ extends 
     
     TestFullBackupWithFailures
    -publicTestFullBackupWithFailures()
    +publicTestFullBackupWithFailures()
     
     
     
    @@ -269,7 +282,7 @@ extends 
     
     testFullBackupWithFailures
    -publicvoidtestFullBackupWithFailures()
    +publicvoidtestFullBackupWithFailures()
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -283,7 +296,7 @@ extends 
     
     runBackupAndFailAtStage
    -publicvoidrunBackupAndFailAtStage(intstage)
    +publicvoidrunBackupAndFailAtStage(intstage)
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullRestore.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullRestore.html 
    b/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullRestore.html
    index 709e735..5a33182 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullRestore.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/backup/TestFullRestore.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestFullRestore
    +public class TestFullRestore
     extends TestBackupBase
     
     
    @@ -150,6 +150,10 @@ extends Field and Description
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     private static org.slf4j.Logger
     LOG
     
    @@ -295,13 +299,22 @@ extends 
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -318,7 +331,7 @@ extends 
     
     TestFullRestore
    -publicTestFullRestore()
    +publicTestFullRestore()
     
     
     
    @@ -335,7 +348,7 @@ extends 
     
     testFullRestoreSingle
    -publicvoidtestFullRestoreSingle()
    +publicvoidtestFullRestoreSingle()
    throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     Verify that a single table is restored to a new table.
     
    @@ -350,7 +363,7 @@ extends 
     
     testFullRestoreSingleCommand
    -publicvoidtestFullRestoreSingleCommand()
    +publicvoidtestFullRestoreSingleCommand()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -364,7 +377,7 @@ extends 
     
     testFullRestoreCheckCommand
    -publicvoidtestFullRestoreCheckCommand()
    +publicvoidtestFullRestoreCheckCommand()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -378,7 +391,7 @@ extends 
     
     testFullRestoreMultiple
    -publicvoidtestFullRestoreMultiple()
    +publicvoidtestFullRestoreMultiple()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     Verify that multiple tables are restored to new 
    tables.
     
    @@ -393,7 +406,7 @@ extends 
     
     testFullRestoreMultipleCommand
    -publicvoidtestFullRestoreMultipleCommand()
    +publicvoidtestFullRestoreMultipleCommand()
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     Verify that multiple tables are restored to new 
    tables.
     
    @@ -408,7 +421,7 @@ extends 
     
     testFullRestoreSingleOverwrite
    -publicvoidtestFullRestoreSingleOverwrite()
    +publicvoidtestFullRestoreSingleOverwrite()
     throws 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
    index eb9e252..667152a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
    @@ -28,22 +28,22 @@
     020
     021import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
     022import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
    -023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    -024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    -025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    -026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    -027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    -028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    -029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    -030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    -031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    -032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    -033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    -034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    -035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    -036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
    -037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    -038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    +024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    +026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    +027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    +028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    +029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    +030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    +031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    +032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    +033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    +034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    +035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    +036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    +037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    +038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
     039
     040import java.io.IOException;
     041import java.net.URI;
    @@ -70,194 +70,194 @@
     062import 
    org.apache.hadoop.hbase.backup.util.BackupUtils;
     063import 
    org.apache.hadoop.hbase.client.Connection;
     064import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -065import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    -066import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -067import 
    org.apache.yetus.audience.InterfaceAudience;
    -068
    -069/**
    -070 * General backup commands, options and 
    usage messages
    -071 */
    -072
    +065import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +066import 
    org.apache.yetus.audience.InterfaceAudience;
    +067
    +068import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    +069
    +070/**
    +071 * General backup commands, options and 
    usage messages
    +072 */
     073@InterfaceAudience.Private
     074public final class BackupCommands {
    -075
    -076  public final static String 
    INCORRECT_USAGE = "Incorrect usage";
    -077
    -078  public final static String 
    TOP_LEVEL_NOT_ALLOWED =
    -079  "Top level (root) folder is not 
    allowed to be a backup destination";
    -080
    -081  public static final String USAGE = 
    "Usage: 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
    index 914b1c6..03a0b2a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
    @@ -59,646 +59,711 @@
     051@InterfaceStability.Evolving
     052public class ScannerContext {
     053
    -054  /**
    -055   * Two sets of the same fields. One for 
    the limits, another for the progress towards those limits
    -056   */
    -057  LimitFields limits;
    -058  LimitFields progress;
    -059
    -060  /**
    -061   * The state of the scanner after the 
    invocation of {@link InternalScanner#next(java.util.List)}
    -062   * or {@link 
    RegionScanner#next(java.util.List)}.
    -063   */
    -064  NextState scannerState;
    -065  private static final NextState 
    DEFAULT_STATE = NextState.MORE_VALUES;
    -066
    -067  /**
    -068   * Used as an indication to invocations 
    of {@link InternalScanner#next(java.util.List)} and
    -069   * {@link 
    RegionScanner#next(java.util.List)} that, if true, the progress tracked within 
    this
    -070   * {@link ScannerContext} instance 
    should be considered while evaluating the limits. Useful for
    -071   * enforcing a set of limits across 
    multiple calls (i.e. the limit may not be reached in a single
    -072   * invocation, but any progress made 
    should be considered in future invocations)
    -073   * p
    -074   * Defaulting this value to false means 
    that, by default, any tracked progress will be wiped clean
    -075   * on invocations to {@link 
    InternalScanner#next(java.util.List)} and
    -076   * {@link 
    RegionScanner#next(java.util.List)} and the call will be treated as though no 
    progress
    -077   * has been made towards the limits so 
    far.
    -078   * p
    -079   * This is an important mechanism. 
    Users of Internal/Region scanners expect that they can define
    -080   * some limits and then repeatedly 
    invoke {@link InternalScanner#next(List)} or
    -081   * {@link RegionScanner#next(List)} 
    where each invocation respects these limits separately.
    -082   * p
    -083   * For example: pre {@code
    -084   * ScannerContext context = new 
    ScannerContext.newBuilder().setBatchLimit(5).build();
    -085   * RegionScanner scanner = ...
    -086   * ListCell results = new 
    ArrayListCell();
    -087   * while(scanner.next(results, 
    context)) {
    -088   *   // Do something with a batch of 5 
    cells
    -089   * }
    -090   * }/pre However, in the case 
    of RPCs, the server wants to be able to define a set of
    -091   * limits for a particular RPC request 
    and have those limits respected across multiple
    -092   * invocations. This means that the 
    progress made towards the limits in earlier calls will be
    -093   * saved and considered in future 
    invocations
    -094   */
    -095  boolean keepProgress;
    -096  private static boolean 
    DEFAULT_KEEP_PROGRESS = false;
    -097
    -098  private Cell lastPeekedCell = null;
    +054  LimitFields limits;
    +055  /**
    +056   * A different set of progress fields. 
    Only include batch, dataSize and heapSize. Compare to
    +057   * LimitFields, ProgressFields doesn't 
    contain time field. As we save a deadline in LimitFields,
    +058   * so use {@link 
    System#currentTimeMillis()} directly when check time limit.
    +059   */
    +060  ProgressFields progress;
    +061
    +062  /**
    +063   * The state of the scanner after the 
    invocation of {@link InternalScanner#next(java.util.List)}
    +064   * or {@link 
    RegionScanner#next(java.util.List)}.
    +065   */
    +066  NextState scannerState;
    +067  private static final NextState 
    DEFAULT_STATE = NextState.MORE_VALUES;
    +068
    +069  /**
    +070   * Used as an indication to invocations 
    of {@link InternalScanner#next(java.util.List)} and
    +071   * {@link 
    RegionScanner#next(java.util.List)} that, if true, the progress tracked within 
    this
    +072   * {@link ScannerContext} instance 
    should be considered while evaluating the limits. Useful for
    +073   * enforcing a set of limits across 
    multiple calls (i.e. the limit may not be reached in a single
    +074   * invocation, but any progress made 
    should be considered in future invocations)
    +075   * p
    +076   * Defaulting this value to false means 
    that, by default, any tracked progress will be wiped clean
    +077   * on invocations to {@link 
    InternalScanner#next(java.util.List)} and
    +078   * {@link 
    RegionScanner#next(java.util.List)} and the call will be treated as though no 
    progress
    +079   * has been made towards the limits so 
    far.
    +080   * p
    +081   * This is an important mechanism. 
    Users of Internal/Region scanners expect that they can define
    +082   * some limits and then repeatedly 
    invoke {@link InternalScanner#next(List)} or
    +083   * {@link RegionScanner#next(List)} 
    where each invocation respects these limits 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
    index 1566a58..93e57a9 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
    @@ -290,7 +290,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     add
    -publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
    +publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
     Attempt to add the specified entry to this queue.
      
      If the queue is smaller than the max size, or if the specified element is
    @@ -308,7 +308,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     poll
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypoll()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypoll()
     
     Returns:
     The next element in this queue, or null if the queue is
    @@ -322,7 +322,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     pollLast
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypollLast()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntrypollLast()
     
     Returns:
     The last element in this queue, or null if the queue is
    @@ -336,7 +336,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     cacheSize
    -publiclongcacheSize()
    +publiclongcacheSize()
     Total size of all elements in this queue.
     
     Returns:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
    index 0d84b3f..5c55313 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
    @@ -273,12 +273,12 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
    -org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
    -org.apache.hadoop.hbase.io.hfile.BlockPriority
     org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
    +org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
     org.apache.hadoop.hbase.io.hfile.BlockType
     org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
    +org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
    +org.apache.hadoop.hbase.io.hfile.BlockPriority
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html
     
    b/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html
    index aad02e1..0e1958d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.FastPathHandler.html
    @@ -330,7 +330,7 @@ extends 
     
     getCallRunner
    -protectedCallRunnergetCallRunner()
    +protectedCallRunnergetCallRunner()
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
     
     Overrides:
    @@ -348,7 +348,7 @@ extends 
     
     loadCallRunner
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.ImplData.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.ImplData.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.ImplData.html
    new file mode 100644
    index 000..a2a9cc9
    --- /dev/null
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.ImplData.html
    @@ -0,0 +1,223 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +Source code
    +
    +
    +
    +
    +001// Autogenerated Jamon proxy
    +002// 
    /home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
    +003
    +004package 
    org.apache.hadoop.hbase.tmpl.master;
    +005
    +006// 27, 5
    +007import java.util.Collections;
    +008// 28, 5
    +009import java.util.List;
    +010// 29, 5
    +011import java.util.Map;
    +012// 30, 5
    +013import java.util.Set;
    +014// 31, 5
    +015import java.util.stream.Collectors;
    +016// 32, 5
    +017import 
    org.apache.hadoop.hbase.master.HMaster;
    +018// 33, 5
    +019import 
    org.apache.hadoop.hbase.ServerLoad;
    +020// 34, 5
    +021import 
    org.apache.hadoop.hbase.RSGroupTableAccessor;
    +022// 35, 5
    +023import 
    org.apache.hadoop.hbase.master.ServerManager;
    +024// 36, 5
    +025import 
    org.apache.hadoop.hbase.net.Address;
    +026// 37, 5
    +027import 
    org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
    +028// 38, 5
    +029import 
    org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
    +030
    +031@org.jamon.annotations.Template(
    +032  signature = 
    "7D9222DA869F721FABDA6206A97B5374",
    +033  requiredArguments = {
    +034@org.jamon.annotations.Argument(name 
    = "master", type = "HMaster"),
    +035@org.jamon.annotations.Argument(name 
    = "serverManager", type = "ServerManager")})
    +036public class RSGroupListTmpl
    +037  extends 
    org.jamon.AbstractTemplateProxy
    +038{
    +039  
    +040  public 
    RSGroupListTmpl(org.jamon.TemplateManager p_manager)
    +041  {
    +042 super(p_manager);
    +043  }
    +044  
    +045  protected RSGroupListTmpl(String 
    p_path)
    +046  {
    +047super(p_path);
    +048  }
    +049  
    +050  public RSGroupListTmpl()
    +051  {
    +052 
    super("/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl");
    +053  }
    +054  
    +055  public interface Intf
    +056extends 
    org.jamon.AbstractTemplateProxy.Intf
    +057  {
    +058
    +059void renderNoFlush(final 
    java.io.Writer jamonWriter) throws java.io.IOException;
    +060
    +061  }
    +062  public static class ImplData
    +063extends 
    org.jamon.AbstractTemplateProxy.ImplData
    +064  {
    +065// 22, 1
    +066public void setMaster(HMaster 
    master)
    +067{
    +068  // 22, 1
    +069  m_master = master;
    +070}
    +071public HMaster getMaster()
    +072{
    +073  return m_master;
    +074}
    +075private HMaster m_master;
    +076// 23, 1
    +077public void 
    setServerManager(ServerManager serverManager)
    +078{
    +079  // 23, 1
    +080  m_serverManager = serverManager;
    +081}
    +082public ServerManager 
    getServerManager()
    +083{
    +084  return m_serverManager;
    +085}
    +086private ServerManager 
    m_serverManager;
    +087  }
    +088  @Override
    +089  protected 
    org.jamon.AbstractTemplateProxy.ImplData makeImplData()
    +090  {
    +091return new ImplData();
    +092  }
    +093  @Override public ImplData 
    getImplData()
    +094  {
    +095return (ImplData) 
    super.getImplData();
    +096  }
    +097  
    +098  
    +099  @Override
    +100  public org.jamon.AbstractTemplateImpl 
    constructImpl(Class? extends org.jamon.AbstractTemplateImpl p_class){
    +101try
    +102{
    +103  return p_class
    +104.getConstructor(new Class [] { 
    org.jamon.TemplateManager.class, ImplData.class })
    +105.newInstance(new Object [] { 
    getTemplateManager(), getImplData()});
    +106}
    +107catch (RuntimeException e)
    +108{
    +109  throw e;
    +110}
    +111catch (Exception e)
    +112{
    +113  throw new RuntimeException(e);
    +114}
    +115  }
    +116  
    +117  @Override
    +118  protected 
    org.jamon.AbstractTemplateImpl constructImpl(){
    +119return new 
    RSGroupListTmplImpl(getTemplateManager(), getImplData());
    +120  }
    +121  public org.jamon.Renderer 
    makeRenderer(final HMaster master, final ServerManager serverManager)
    +122  {
    +123return new 
    org.jamon.AbstractRenderer() {
    +124  @Override
    +125  public void renderTo(final 
    java.io.Writer jamonWriter)
    +126throws java.io.IOException
    +127  {
    +128render(jamonWriter, master, 
    serverManager);
    +129  }
    +130};
    +131  }
    +132  
    +133  public void render(final java.io.Writer 
    jamonWriter, final HMaster master, final ServerManager serverManager)
    +134throws java.io.IOException
    +135  {
    +136renderNoFlush(jamonWriter, master, 
    serverManager);
    +137jamonWriter.flush();
    +138  }
    +139  public void renderNoFlush(final 
    java.io.Writer jamonWriter, final HMaster master, final ServerManager 
    serverManager)
    +140throws java.io.IOException
    +141  {
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
    index 2f116fc..ecf1a43 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
    @@ -197,7 +197,7 @@ extends 
    org.apache.hadoop.hbase.regionserver.HRegionServer
     
     
     Fields inherited from 
    classorg.apache.hadoop.hbase.regionserver.HRegionServer
    -cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, 
    compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, 
    fsUtilizationChore, hMemManager, infoServer, initLatch, leases, lock, 
    MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, 
    numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, 
    regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, 
    rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, 
    tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, 
    useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper
    +cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, 
    compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, 
    fsUtilizationChore, hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, 
    metaTableLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, 
    regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, 
    replicationSinkHandler, replicationSourceHandler, rpcServices, 
    secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, 
    TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, 
    walFactory, walFs, walRoller, zooKeeper
     
     
     
    @@ -262,7 +262,7 @@ extends 
    org.apache.hadoop.hbase.regionserver.HRegionServer
     
     
     Methods inherited from 
    classorg.apache.hadoop.hbase.regionserver.HRegionServer
    -abort, addRegion, addToMovedRegions, canCreateBaseZNode, 
    canUpdateTableDescriptor, checkFileSystem, cleanMovedRegions, 
    clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, 
    closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, 
    createClusterConnection, createConnection, createRegionLoad, 
    createRegionServerStatusStub, createRegionServerStatusStub, createRpcServices, 
    execRegionServerService, executeProcedure, getCacheConfig, getChoreService, 
    getClusterConnection, getClusterId, getCompactionPressure, 
    getCompactionRequestor, getCompactSplitThread, getConfiguration, 
    getConfigurationManager, getConnection, getCoordinatedStateManager, 
    getDumpServlet, getEventLoopGroupConfig, getExecutorService, 
    getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, 
    getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, 
    getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, 
    getMetaTableLocator, g
     etMetaTableObserver, getMetrics, getMostLoadedRegions, getNonceManager, 
    getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, 
    getOnlineTables, getProcessName, getRegion, getRegion, getRegionBlockLocations, 
    getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, 
    getRegionServerAccounting, getRegionServerCoprocessorHost, 
    getRegionServerCoprocessors, getRegionServerMetrics, 
    getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, 
    getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, 
    getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getServerName, 
    getStartcode, getTableDescriptors, getThreadWakeFrequency, getWAL, 
    getWALFileSystem, getWalRoller, getWALRootDir, getWALs, getZooKeeper, 
    initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, 
    login, main, movedRegionCleanerPeriod, onConfigurationChange, 
    postOpenDeployTasks, regionLock, registerService, remoteProcedureComplete, 
    removeRegion, reportRegionSize
     sForQuotas, reportRegionStateTransition, sendShutdownInterrupt, setInitLatch, 
    setupClusterConnection, shouldUseThisHostnameInstead, stop, stop, 
    stopServiceThreads, toString, tryRegionServerReport, unassign, 
    updateConfiguration, updateRegionFavoredNodesMapping, waitForMasterActive, 
    waitForServerOnline, walRollRequestFinished
    +abort, addRegion, addToMovedRegions, canCreateBaseZNode, 
    canUpdateTableDescriptor, checkFileSystem, cleanMovedRegions, 
    clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, 
    closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, 
    createClusterConnection, createConnection, createRegionLoad, 
    createRegionServerStatusStub, 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    index 4fec5eb..63c9ca7 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    @@ -35,254 +35,262 @@
     027import java.util.List;
     028import 
    java.util.concurrent.ConcurrentHashMap;
     029import 
    java.util.concurrent.ConcurrentMap;
    -030
    +030import java.util.concurrent.locks.Lock;
     031import 
    org.apache.hadoop.conf.Configuration;
    -032import 
    org.apache.yetus.audience.InterfaceAudience;
    -033import org.slf4j.Logger;
    -034import org.slf4j.LoggerFactory;
    -035// imports for classes still in 
    regionserver.wal
    -036import 
    org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
    -037import 
    org.apache.hadoop.hbase.util.Bytes;
    -038import 
    org.apache.hadoop.hbase.util.IdLock;
    -039
    -040/**
    -041 * A WAL Provider that returns a WAL per 
    group of regions.
    -042 *
    -043 * This provider follows the decorator 
    pattern and mainly holds the logic for WAL grouping.
    -044 * WAL creation/roll/close is delegated 
    to {@link #DELEGATE_PROVIDER}
    -045 *
    -046 * Region grouping is handled via {@link 
    RegionGroupingStrategy} and can be configured via the
    -047 * property 
    "hbase.wal.regiongrouping.strategy". Current strategy choices are
    -048 * ul
    -049 *   
    liemdefaultStrategy/em : Whatever strategy this version 
    of HBase picks. currently
    -050 *  
    "bounded"./li
    -051 *   
    liemidentity/em : each region belongs to its own 
    group./li
    -052 *   
    liembounded/em : bounded number of groups and region 
    evenly assigned to each group./li
    -053 * /ul
    -054 * Optionally, a FQCN to a custom 
    implementation may be given.
    -055 */
    -056@InterfaceAudience.Private
    -057public class RegionGroupingProvider 
    implements WALProvider {
    -058  private static final Logger LOG = 
    LoggerFactory.getLogger(RegionGroupingProvider.class);
    -059
    -060  /**
    -061   * Map identifiers to a group number.
    -062   */
    -063  public static interface 
    RegionGroupingStrategy {
    -064String GROUP_NAME_DELIMITER = ".";
    -065
    -066/**
    -067 * Given an identifier and a 
    namespace, pick a group.
    -068 */
    -069String group(final byte[] identifier, 
    byte[] namespace);
    -070void init(Configuration config, 
    String providerId);
    -071  }
    -072
    -073  /**
    -074   * Maps between configuration names for 
    strategies and implementation classes.
    -075   */
    -076  static enum Strategies {
    -077
    defaultStrategy(BoundedGroupingStrategy.class),
    -078
    identity(IdentityGroupingStrategy.class),
    -079
    bounded(BoundedGroupingStrategy.class),
    -080
    namespace(NamespaceGroupingStrategy.class);
    -081
    -082final Class? extends 
    RegionGroupingStrategy clazz;
    -083Strategies(Class? extends 
    RegionGroupingStrategy clazz) {
    -084  this.clazz = clazz;
    -085}
    -086  }
    -087
    -088  /**
    -089   * instantiate a strategy from a config 
    property.
    -090   * requires conf to have already been 
    set (as well as anything the provider might need to read).
    -091   */
    -092  RegionGroupingStrategy 
    getStrategy(final Configuration conf, final String key,
    -093  final String defaultValue) throws 
    IOException {
    -094Class? extends 
    RegionGroupingStrategy clazz;
    -095try {
    -096  clazz = 
    Strategies.valueOf(conf.get(key, defaultValue)).clazz;
    -097} catch (IllegalArgumentException 
    exception) {
    -098  // Fall back to them specifying a 
    class name
    -099  // Note that the passed default 
    class shouldn't actually be used, since the above only fails
    -100  // when there is a config value 
    present.
    -101  clazz = conf.getClass(key, 
    IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
    -102}
    -103LOG.info("Instantiating 
    RegionGroupingStrategy of type " + clazz);
    -104try {
    -105  final RegionGroupingStrategy result 
    = clazz.newInstance();
    -106  result.init(conf, providerId);
    -107  return result;
    -108} catch (InstantiationException 
    exception) {
    -109  LOG.error("couldn't set up region 
    grouping strategy, check config key " +
    -110  REGION_GROUPING_STRATEGY);
    -111  LOG.debug("Exception details for 
    failure to load region grouping strategy.", exception);
    -112  throw new IOException("couldn't set 
    up region grouping strategy", exception);
    -113} catch (IllegalAccessException 
    exception) {
    -114  LOG.error("couldn't set up region 
    grouping strategy, check config key " +
    -115  REGION_GROUPING_STRATEGY);
    -116  LOG.debug("Exception details for 
    failure to load region grouping strategy.", exception);
    -117  throw new IOException("couldn't set 
    up region grouping strategy", exception);
    -118}
    -119  }
    -120
    -121  

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    index f9f3d54..a28a337 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
    +private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
     extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
     
     
    @@ -232,7 +232,7 @@ extends 
     
     AddColumnFamilyProcedureBiConsumer
    -AddColumnFamilyProcedureBiConsumer(TableNametableName)
    +AddColumnFamilyProcedureBiConsumer(TableNametableName)
     
     
     
    @@ -249,7 +249,7 @@ extends 
     
     getOperationType
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
     
     Specified by:
     getOperationTypein
     classRawAsyncHBaseAdmin.TableProcedureBiConsumer
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
    index 5479838..63ef321 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
     title="class or interface in java.lang">@FunctionalInterface
    -private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
    +private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
     
     
     
    @@ -159,7 +159,7 @@ private static interface 
     
     call
    -voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
    +voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
       HBaseRpcControllercontroller,
       REQreq,
       org.apache.hbase.thirdparty.com.google.protobuf.RpcCallbackRESPdone)
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    index cd4daf6..8b1303f 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
     title="class or interface in java.lang">@FunctionalInterface
    -private static interface RawAsyncHBaseAdmin.ConverterD,S
    +private static interface RawAsyncHBaseAdmin.ConverterD,S
     
     
     
    @@ -156,7 +156,7 @@ private static interface 
     
     convert
    -Dconvert(Ssrc)
    +Dconvert(Ssrc)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    index 01f186e..00eacf7 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
    +private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
     

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
    index 2041055..b03d0d0 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
    @@ -331,8 +331,8 @@
     
     
     default void
    -MasterObserver.postGetClusterStatus(ObserverContextMasterCoprocessorEnvironmentctx,
    -ClusterStatusstatus)
    +MasterObserver.postGetClusterMetrics(ObserverContextMasterCoprocessorEnvironmentctx,
    + ClusterMetricsstatus)
     Called after get cluster status.
     
     
    @@ -811,7 +811,7 @@
     
     
     default void
    -MasterObserver.preGetClusterStatus(ObserverContextMasterCoprocessorEnvironmentctx)
    +MasterObserver.preGetClusterMetrics(ObserverContextMasterCoprocessorEnvironmentctx)
     Called before get cluster status.
     
     
    @@ -1483,128 +1483,124 @@
     
     
     void
    -AccessController.preGetClusterStatus(ObserverContextMasterCoprocessorEnvironmentctx)
    -
    -
    -void
     AccessController.preGetLocks(ObserverContextMasterCoprocessorEnvironmentctx)
     
    -
    +
     void
     AccessController.preGetNamespaceDescriptor(ObserverContextMasterCoprocessorEnvironmentctx,
      http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringnamespace)
     
    -
    +
     void
     AccessController.preGetProcedures(ObserverContextMasterCoprocessorEnvironmentctx)
     
    -
    +
     void
     AccessController.preGetReplicationPeerConfig(ObserverContextMasterCoprocessorEnvironmentctx,
    http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
     
    -
    +
     void
     AccessController.preGetTableDescriptors(ObserverContextMasterCoprocessorEnvironmentctx,
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableNametableNamesList,
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptordescriptors,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringregex)
     
    -
    +
     void
     AccessController.preListDecommissionedRegionServers(ObserverContextMasterCoprocessorEnvironmentctx)
     
    -
    +
     void
     AccessController.preListReplicationPeers(ObserverContextMasterCoprocessorEnvironmentctx,
    http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringregex)
     
    -
    +
     void
     AccessController.preListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
    SnapshotDescriptionsnapshot)
     
    -
    +
     void
     AccessController.preLockHeartbeat(ObserverContextMasterCoprocessorEnvironmentctx,
     TableNametableName,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringdescription)
     
    -
    +
     void
     AccessController.preMergeRegions(ObserverContextMasterCoprocessorEnvironmentctx,
    RegionInfo[]regionsToMerge)
     
    -
    +
     void
     AccessController.preModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
       NamespaceDescriptorns)
     
    -
    +
     void
     AccessController.preModifyTable(ObserverContextMasterCoprocessorEnvironmentc,
       TableNametableName,
       TableDescriptorhtd)
     
    -
    +
     void
     CoprocessorWhitelistMasterObserver.preModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
       TableNametableName,
       TableDescriptorhtd)
     
    -
    +
     void
     AccessController.preMove(ObserverContextMasterCoprocessorEnvironmentc,
    RegionInforegion,
    ServerNamesrcServer,
    ServerNamedestServer)
     
    -
    +
     void
     AccessController.preMoveServers(ObserverContextMasterCoprocessorEnvironmentctx,
       http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringtargetGroup)
     
    -
    +
     void
     AccessController.preMoveServersAndTables(ObserverContextMasterCoprocessorEnvironmentctx,
    http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
    index 5b3b750..a1f3f7e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
    @@ -97,3307 +97,3304 @@
     089import 
    org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
     090import 
    org.apache.hbase.thirdparty.io.netty.util.Timeout;
     091import 
    org.apache.hbase.thirdparty.io.netty.util.TimerTask;
    -092import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -093import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -094import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -095import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
    -096import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
    -097import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -098import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
    -099import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -100import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
    -101import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
    -102import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
    -103import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -104import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -105import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
    -106import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -108import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -109import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -110import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
    -111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
    -113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -133import 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.MutableRegionInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.MutableRegionInfo.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.MutableRegionInfo.html
    index 9dbe151..d63e84a 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.MutableRegionInfo.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.MutableRegionInfo.html
    @@ -169,6 +169,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.html
    index 8552929..ee22333 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoBuilder.html
    @@ -190,6 +190,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoDisplay.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoDisplay.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoDisplay.html
    index 2f9ac10..3d71842 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoDisplay.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfoDisplay.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLoadStats.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLoadStats.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLoadStats.html
    index 98d3d0b..3991d59 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLoadStats.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLoadStats.html
    @@ -250,6 +250,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    index 6a62b40..2ac1b78 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    @@ -270,6 +270,6 @@ the order they are declared.
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    index ac8bb88..fbe0658 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    @@ -497,6 +497,6 @@ service.
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    index 985778f..854ba52 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    @@ -662,1932 +662,1924 @@
     654
    completeCompaction(toBeRemovedStoreFiles);
     655  }
     656
    -657  private HStoreFile 
    createStoreFileAndReader(final Path p) throws IOException {
    -658StoreFileInfo info = new 
    StoreFileInfo(conf, this.getFileSystem(), p);
    -659return 
    createStoreFileAndReader(info);
    -660  }
    -661
    -662  private HStoreFile 
    createStoreFileAndReader(StoreFileInfo info) throws IOException {
    -663
    info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
    -664HStoreFile storeFile = new 
    HStoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
    -665this.family.getBloomFilterType(), 
    isPrimaryReplicaStore());
    -666storeFile.initReader();
    -667return storeFile;
    -668  }
    -669
    -670  /**
    -671   * This message intends to inform the 
    MemStore that next coming updates
    -672   * are going to be part of the 
    replaying edits from WAL
    -673   */
    -674  public void startReplayingFromWAL(){
    -675
    this.memstore.startReplayingFromWAL();
    -676  }
    -677
    -678  /**
    -679   * This message intends to inform the 
    MemStore that the replaying edits from WAL
    -680   * are done
    -681   */
    -682  public void stopReplayingFromWAL(){
    -683
    this.memstore.stopReplayingFromWAL();
    -684  }
    -685
    -686  /**
    -687   * Adds a value to the memstore
    -688   */
    -689  public void add(final Cell cell, 
    MemStoreSizing memstoreSizing) {
    -690lock.readLock().lock();
    -691try {
    -692   this.memstore.add(cell, 
    memstoreSizing);
    -693} finally {
    -694  lock.readLock().unlock();
    -695}
    -696  }
    -697
    -698  /**
    -699   * Adds the specified value to the 
    memstore
    -700   */
    -701  public void add(final 
    IterableCell cells, MemStoreSizing memstoreSizing) {
    -702lock.readLock().lock();
    -703try {
    -704  memstore.add(cells, 
    memstoreSizing);
    -705} finally {
    -706  lock.readLock().unlock();
    -707}
    -708  }
    -709
    -710  @Override
    -711  public long timeOfOldestEdit() {
    -712return memstore.timeOfOldestEdit();
    -713  }
    -714
    -715  /**
    -716   * @return All store files.
    -717   */
    -718  @Override
    -719  public CollectionHStoreFile 
    getStorefiles() {
    -720return 
    this.storeEngine.getStoreFileManager().getStorefiles();
    -721  }
    -722
    -723  @Override
    -724  public CollectionHStoreFile 
    getCompactedFiles() {
    -725return 
    this.storeEngine.getStoreFileManager().getCompactedfiles();
    -726  }
    -727
    -728  /**
    -729   * This throws a WrongRegionException 
    if the HFile does not fit in this region, or an
    -730   * InvalidHFileException if the HFile 
    is not valid.
    -731   */
    -732  public void assertBulkLoadHFileOk(Path 
    srcPath) throws IOException {
    -733HFile.Reader reader  = null;
    -734try {
    -735  LOG.info("Validating hfile at " + 
    srcPath + " for inclusion in "
    -736  + "store " + this + " region " 
    + this.getRegionInfo().getRegionNameAsString());
    -737  reader = 
    HFile.createReader(srcPath.getFileSystem(conf), srcPath, cacheConf,
    -738isPrimaryReplicaStore(), conf);
    -739  reader.loadFileInfo();
    -740
    -741  Optionalbyte[] firstKey = 
    reader.getFirstRowKey();
    -742  
    Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
    -743  OptionalCell lk = 
    reader.getLastKey();
    -744  
    Preconditions.checkState(lk.isPresent(), "Last key can not be null");
    -745  byte[] lastKey =  
    CellUtil.cloneRow(lk.get());
    -746
    -747  LOG.debug("HFile bounds: first=" + 
    Bytes.toStringBinary(firstKey.get()) +
    -748  " last=" + 
    Bytes.toStringBinary(lastKey));
    -749  LOG.debug("Region bounds: first=" 
    +
    -750  
    Bytes.toStringBinary(getRegionInfo().getStartKey()) +
    -751  " last=" + 
    Bytes.toStringBinary(getRegionInfo().getEndKey()));
    -752
    -753  if 
    (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
    -754throw new WrongRegionException(
    -755"Bulk load file " + 
    srcPath.toString() + " does not fit inside region "
    -756+ 
    this.getRegionInfo().getRegionNameAsString());
    -757  }
    -758
    -759  if(reader.length()  
    conf.getLong(HConstants.HREGION_MAX_FILESIZE,
    -760  
    HConstants.DEFAULT_MAX_FILE_SIZE)) {
    -761LOG.warn("Trying to bulk load 
    hfile " + srcPath.toString() + " with size: " +
    -762reader.length() + " bytes can 
    be problematic as it may lead to oversplitting.");
    -763  }
    -764
    -765  if (verifyBulkLoads) {
    -766long verificationStartTime = 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    index ef76b9b..c96a94b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    @@ -161,11 +161,11 @@
     org.apache.hadoop.hbase.client.BufferedMutatorImpl (implements 
    org.apache.hadoop.hbase.client.BufferedMutator)
     org.apache.hadoop.hbase.client.BufferedMutatorImpl.QueueRowAccess (implements 
    org.apache.hadoop.hbase.client.RowAccessT)
     org.apache.hadoop.hbase.client.BufferedMutatorParams (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable)
    -org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
     (implements org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler)
    +org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
     (implements org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler)
     
    -org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
     (implements 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler)
    +org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
     (implements org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandler)
     
    -org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandlerI
    +org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandlerI
     
     org.apache.hadoop.hbase.client.ClusterStatusListener.MulticastListener.ClusterStatusHandler
     
    @@ -269,14 +269,14 @@
     org.apache.hadoop.hbase.client.Mutation (implements org.apache.hadoop.hbase.CellScannable, 
    org.apache.hadoop.hbase.io.HeapSize, org.apache.hadoop.hbase.client.Row)
     
     org.apache.hadoop.hbase.client.Append
    -org.apache.hadoop.hbase.client.Delete 
    (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT)
    -org.apache.hadoop.hbase.client.Increment (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT)
    -org.apache.hadoop.hbase.client.Put 
    (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, 
    org.apache.hadoop.hbase.io.HeapSize)
    +org.apache.hadoop.hbase.client.Delete
    +org.apache.hadoop.hbase.client.Increment
    +org.apache.hadoop.hbase.client.Put 
    (implements org.apache.hadoop.hbase.io.HeapSize)
     
     
     org.apache.hadoop.hbase.client.Query
     
    -org.apache.hadoop.hbase.client.Get 
    (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, 
    org.apache.hadoop.hbase.client.Row)
    +org.apache.hadoop.hbase.client.Get 
    (implements org.apache.hadoop.hbase.client.Row)
     org.apache.hadoop.hbase.client.Scan
     
     
    @@ -545,24 +545,24 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    +org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
    +org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
     org.apache.hadoop.hbase.client.Durability
    +org.apache.hadoop.hbase.client.Scan.ReadType
    +org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
    +org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
    +org.apache.hadoop.hbase.client.Consistency
     org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
    -org.apache.hadoop.hbase.client.TableState.State
    -org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
    -org.apache.hadoop.hbase.client.MasterSwitchType
    +org.apache.hadoop.hbase.client.IsolationLevel
     org.apache.hadoop.hbase.client.RequestController.ReturnCode
    -org.apache.hadoop.hbase.client.Consistency
    -org.apache.hadoop.hbase.client.SnapshotType
    -org.apache.hadoop.hbase.client.CompactType
     org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
    +org.apache.hadoop.hbase.client.SnapshotType
     org.apache.hadoop.hbase.client.CompactionState
    -org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
    -org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html 
    b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
    index d68657d..92659c0 100644
    --- a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
    +++ b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public final class PrivateCellUtil
    +public final class PrivateCellUtil
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Utility methods helpful slinging Cell instances. It has more powerful 
    and
      rich set of APIs than those in CellUtil for internal usage.
    @@ -134,7 +134,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.EmptyByteBufferCell
    +PrivateCellUtil.EmptyByteBufferExtendedCell
     These cells are used in reseeks/seeks to improve the read 
    performance.
     
     
    @@ -146,7 +146,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.FirstOnRowByteBufferCell
    +PrivateCellUtil.FirstOnRowByteBufferExtendedCell
     
     
     private static class
    @@ -154,7 +154,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.FirstOnRowColByteBufferCell
    +PrivateCellUtil.FirstOnRowColByteBufferExtendedCell
     
     
     private static class
    @@ -162,7 +162,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.FirstOnRowColTSByteBufferCell
    +PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell
     
     
     private static class
    @@ -174,7 +174,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.LastOnRowByteBufferCell
    +PrivateCellUtil.LastOnRowByteBufferExtendedCell
     
     
     private static class
    @@ -182,7 +182,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static class
    -PrivateCellUtil.LastOnRowColByteBufferCell
    +PrivateCellUtil.LastOnRowColByteBufferExtendedCell
     
     
     private static class
    @@ -190,7 +190,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     (package private) static class
    -PrivateCellUtil.TagRewriteByteBufferCell
    +PrivateCellUtil.TagRewriteByteBufferExtendedCell
     
     
     (package private) static class
    @@ -200,7 +200,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     (package private) static class
    -PrivateCellUtil.ValueAndTagRewriteByteBufferCell
    +PrivateCellUtil.ValueAndTagRewriteByteBufferExtendedCell
     
     
     (package private) static class
    @@ -735,12 +735,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    -static Cell.DataType
    -toDataType(bytetype)
    +static Cell.Type
    +toType(bytetype)
     
     
     static KeyValue.Type
    -toTypeByte(Cell.DataTypetype)
    +toTypeByte(Cell.Typetype)
     
     
     static boolean
    @@ -881,7 +881,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     PrivateCellUtil
    -privatePrivateCellUtil()
    +privatePrivateCellUtil()
     Private constructor to keep this class from being 
    instantiated.
     
     
    @@ -899,7 +899,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fillRowRange
    -public staticByteRangefillRowRange(Cellcell,
    +public staticByteRangefillRowRange(Cellcell,
      ByteRangerange)
     ByteRange
     
    @@ -910,7 +910,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fillFamilyRange
    -public staticByteRangefillFamilyRange(Cellcell,
    +public staticByteRangefillFamilyRange(Cellcell,
     ByteRangerange)
     
     
    @@ -920,7 +920,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fillQualifierRange
    -public staticByteRangefillQualifierRange(Cellcell,
    +public staticByteRangefillQualifierRange(Cellcell,
    ByteRangerange)
     
     
    @@ -930,7 +930,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fillValueRange
    -public staticByteRangefillValueRange(Cellcell,
    +public staticByteRangefillValueRange(Cellcell,
    ByteRangerange)
     
     
    @@ -940,7 +940,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fillTagRange
    -public staticByteRangefillTagRange(Cellcell,
    +public staticByteRangefillTagRange(Cellcell,
      ByteRangerange)
     
     
    @@ -950,7 +950,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.KeyValueStatsCollector.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.KeyValueStatsCollector.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.KeyValueStatsCollector.html
    index cf9843c..d579a94 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.KeyValueStatsCollector.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.KeyValueStatsCollector.html
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class HFilePrettyPrinter.KeyValueStatsCollector
    +private static class HFilePrettyPrinter.KeyValueStatsCollector
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -263,7 +263,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     metricsRegistry
    -private finalcom.codahale.metrics.MetricRegistry metricsRegistry
    +private finalcom.codahale.metrics.MetricRegistry metricsRegistry
     
     
     
    @@ -272,7 +272,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     metricsOutput
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/io/ByteArrayOutputStream.html?is-external=true;
     title="class or interface in java.io">ByteArrayOutputStream metricsOutput
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/io/ByteArrayOutputStream.html?is-external=true;
     title="class or interface in java.io">ByteArrayOutputStream metricsOutput
     
     
     
    @@ -281,7 +281,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     simpleReporter
    -private finalHFilePrettyPrinter.SimpleReporter simpleReporter
    +private finalHFilePrettyPrinter.SimpleReporter simpleReporter
     
     
     
    @@ -290,7 +290,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     keyLen
    -com.codahale.metrics.Histogram keyLen
    +com.codahale.metrics.Histogram keyLen
     
     
     
    @@ -299,7 +299,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     valLen
    -com.codahale.metrics.Histogram valLen
    +com.codahale.metrics.Histogram valLen
     
     
     
    @@ -308,7 +308,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     rowSizeBytes
    -com.codahale.metrics.Histogram rowSizeBytes
    +com.codahale.metrics.Histogram rowSizeBytes
     
     
     
    @@ -317,7 +317,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     rowSizeCols
    -com.codahale.metrics.Histogram rowSizeCols
    +com.codahale.metrics.Histogram rowSizeCols
     
     
     
    @@ -326,7 +326,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     curRowBytes
    -long curRowBytes
    +long curRowBytes
     
     
     
    @@ -335,7 +335,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     curRowCols
    -long curRowCols
    +long curRowCols
     
     
     
    @@ -344,7 +344,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     biggestRow
    -byte[] biggestRow
    +byte[] biggestRow
     
     
     
    @@ -353,7 +353,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     prevCell
    -privateCell prevCell
    +privateCell prevCell
     
     
     
    @@ -362,7 +362,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     maxRowBytes
    -privatelong maxRowBytes
    +privatelong maxRowBytes
     
     
     
    @@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     curRowKeyLength
    -privatelong curRowKeyLength
    +privatelong curRowKeyLength
     
     
     
    @@ -388,7 +388,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     KeyValueStatsCollector
    -privateKeyValueStatsCollector()
    +privateKeyValueStatsCollector()
     
     
     
    @@ -405,7 +405,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     collect
    -publicvoidcollect(Cellcell)
    +publicvoidcollect(Cellcell)
     
     
     
    @@ -414,7 +414,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     collectRow
    -privatevoidcollectRow()
    +privatevoidcollectRow()
     
     
     
    @@ -423,7 +423,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     finish
    -publicvoidfinish()
    +publicvoidfinish()
     
     
     
    @@ -432,7 +432,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     toString
    -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtoString()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtoString()
     
     Overrides:
     http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toStringin 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    index 1cf45c7..487ea03 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    @@ -332,11 +332,11 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
    +org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
     org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
    -org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
    +org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
     org.apache.hadoop.hbase.master.RegionState.State
    -org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
    +org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
    index d075171..20eb5a8 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
    @@ -208,8 +208,8 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
     org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
    +org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
    index d264c19..c43c370 100644
    --- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
    @@ -125,8 +125,8 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.monitoring.MonitoredTask.State
     org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
    +org.apache.hadoop.hbase.monitoring.MonitoredTask.State
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    index d791c07..c68325b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    @@ -443,20 +443,20 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.CompareOperator
    -org.apache.hadoop.hbase.Cell.DataType
     

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
    index 6fecbc9..2accda0 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
    @@ -34,4140 +34,4141 @@
     026import 
    java.nio.charset.StandardCharsets;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.EnumSet;
    -031import java.util.HashMap;
    -032import java.util.Iterator;
    -033import java.util.LinkedList;
    -034import java.util.List;
    -035import java.util.Map;
    -036import java.util.Set;
    -037import java.util.concurrent.Callable;
    -038import 
    java.util.concurrent.ExecutionException;
    -039import java.util.concurrent.Future;
    -040import java.util.concurrent.TimeUnit;
    -041import 
    java.util.concurrent.TimeoutException;
    -042import 
    java.util.concurrent.atomic.AtomicInteger;
    -043import 
    java.util.concurrent.atomic.AtomicReference;
    -044import java.util.regex.Pattern;
    -045import java.util.stream.Collectors;
    -046import java.util.stream.Stream;
    -047import 
    org.apache.hadoop.conf.Configuration;
    -048import 
    org.apache.hadoop.hbase.Abortable;
    -049import 
    org.apache.hadoop.hbase.CacheEvictionStats;
    -050import 
    org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
    -051import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -052import 
    org.apache.hadoop.hbase.ClusterStatus;
    -053import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -054import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -055import 
    org.apache.hadoop.hbase.HConstants;
    -056import 
    org.apache.hadoop.hbase.HRegionInfo;
    -057import 
    org.apache.hadoop.hbase.HRegionLocation;
    -058import 
    org.apache.hadoop.hbase.HTableDescriptor;
    -059import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -060import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -061import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -062import 
    org.apache.hadoop.hbase.NamespaceNotFoundException;
    -063import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -064import 
    org.apache.hadoop.hbase.RegionLoad;
    -065import 
    org.apache.hadoop.hbase.RegionLocations;
    -066import 
    org.apache.hadoop.hbase.ServerName;
    -067import 
    org.apache.hadoop.hbase.TableExistsException;
    -068import 
    org.apache.hadoop.hbase.TableName;
    -069import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -070import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -071import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -072import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -073import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -074import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -075import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -076import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -077import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -078import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -079import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -080import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -081import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -082import 
    org.apache.hadoop.hbase.quotas.QuotaRetriever;
    -083import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -084import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -085import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -086import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -087import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
    -090import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -091import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -092import 
    org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
    -093import 
    org.apache.hadoop.hbase.util.Addressing;
    -094import 
    org.apache.hadoop.hbase.util.Bytes;
    -095import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -096import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -097import 
    org.apache.hadoop.hbase.util.Pair;
    -098import 
    org.apache.hadoop.ipc.RemoteException;
    -099import 
    org.apache.hadoop.util.StringUtils;
    -100import 
    org.apache.yetus.audience.InterfaceAudience;
    -101import 
    org.apache.yetus.audience.InterfaceStability;
    -102import org.slf4j.Logger;
    -103import org.slf4j.LoggerFactory;
    -104
    -105import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -106import 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
    index e2e01c7..0c7ac41 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
    @@ -143,220 +143,232 @@
     135  }
     136
     137  /**
    -138   * @param d Delete to clone.
    +138   * @param deleteToCopy delete to copy
     139   */
    -140  public Delete(final Delete d) {
    -141this.row = d.getRow();
    -142this.ts = d.getTimeStamp();
    -143
    this.familyMap.putAll(d.getFamilyCellMap());
    -144this.durability = d.durability;
    -145for (Map.EntryString, byte[] 
    entry : d.getAttributesMap().entrySet()) {
    -146  this.setAttribute(entry.getKey(), 
    entry.getValue());
    -147}
    -148super.setPriority(d.getPriority());
    -149  }
    -150
    -151  /**
    -152   * Advanced use only. Add an existing 
    delete marker to this Delete object.
    -153   * @param kv An existing KeyValue of 
    type "delete".
    -154   * @return this for invocation 
    chaining
    -155   * @throws IOException
    -156   * @deprecated As of release 2.0.0, 
    this will be removed in HBase 3.0.0. Use {@link #add(Cell)}
    -157   * instead
    -158   */
    -159  @SuppressWarnings("unchecked")
    -160  @Deprecated
    -161  public Delete addDeleteMarker(Cell kv) 
    throws IOException {
    -162return this.add(kv);
    -163  }
    -164
    -165  /**
    -166   * Add an existing delete marker to 
    this Delete object.
    -167   * @param kv An existing KeyValue of 
    type "delete".
    -168   * @return this for invocation 
    chaining
    -169   * @throws IOException
    -170   */
    -171  public Delete add(Cell kv) throws 
    IOException {
    -172if (!CellUtil.isDelete(kv)) {
    -173  throw new IOException("The recently 
    added KeyValue is not of type "
    -174  + "delete. Rowkey: " + 
    Bytes.toStringBinary(this.row));
    -175}
    -176if (!CellUtil.matchingRows(kv, 
    this.row)) {
    -177  throw new WrongRowIOException("The 
    row in " + kv.toString() +
    -178" doesn't match the original one 
    " +  Bytes.toStringBinary(this.row));
    -179}
    -180byte [] family = 
    CellUtil.cloneFamily(kv);
    -181ListCell list = 
    getCellList(family);
    -182list.add(kv);
    -183return this;
    -184  }
    -185
    -186  /**
    -187   * Delete all versions of all columns 
    of the specified family.
    -188   * p
    -189   * Overrides previous calls to 
    deleteColumn and deleteColumns for the
    -190   * specified family.
    -191   * @param family family name
    -192   * @return this for invocation 
    chaining
    -193   */
    -194  public Delete addFamily(final byte [] 
    family) {
    -195this.addFamily(family, this.ts);
    -196return this;
    -197  }
    -198
    -199  /**
    -200   * Delete all columns of the specified 
    family with a timestamp less than
    -201   * or equal to the specified 
    timestamp.
    -202   * p
    -203   * Overrides previous calls to 
    deleteColumn and deleteColumns for the
    -204   * specified family.
    -205   * @param family family name
    -206   * @param timestamp maximum version 
    timestamp
    -207   * @return this for invocation 
    chaining
    -208   */
    -209  public Delete addFamily(final byte [] 
    family, final long timestamp) {
    -210if (timestamp  0) {
    -211  throw new 
    IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
    -212}
    -213ListCell list = 
    getCellList(family);
    -214if(!list.isEmpty()) {
    -215  list.clear();
    -216}
    -217KeyValue kv = new KeyValue(row, 
    family, null, timestamp, KeyValue.Type.DeleteFamily);
    -218list.add(kv);
    -219return this;
    -220  }
    -221
    -222  /**
    -223   * Delete all columns of the specified 
    family with a timestamp equal to
    -224   * the specified timestamp.
    -225   * @param family family name
    -226   * @param timestamp version timestamp
    -227   * @return this for invocation 
    chaining
    -228   */
    -229  public Delete addFamilyVersion(final 
    byte [] family, final long timestamp) {
    -230ListCell list = 
    getCellList(family);
    -231list.add(new KeyValue(row, family, 
    null, timestamp,
    -232  
    KeyValue.Type.DeleteFamilyVersion));
    -233return this;
    -234  }
    -235
    -236  /**
    -237   * Delete all versions of the specified 
    column.
    -238   * @param family family name
    -239   * @param qualifier column qualifier
    -240   * @return this for invocation 
    chaining
    -241   */
    -242  public Delete addColumns(final byte [] 
    family, final byte [] qualifier) {
    -243addColumns(family, qualifier, 
    this.ts);
    -244return this;
    -245  }
    -246
    -247  /**
    -248   * Delete all versions of the specified 
    column with a timestamp less than
    -249   * or equal to the specified 
    timestamp.
    -250   * @param family family name
    -251   * @param qualifier column qualifier
    -252   * @param timestamp maximum version 
    timestamp
    -253   * @return this for invocation 
    chaining
    -254   */
    -255  public Delete addColumns(final byte [] 
    family, final byte [] 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    index 70481ce..b9f6622 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    @@ -47,1648 +47,1649 @@
     039import java.util.zip.GZIPInputStream;
     040import java.util.zip.GZIPOutputStream;
     041
    -042import org.apache.commons.logging.Log;
    -043import 
    org.apache.commons.logging.LogFactory;
    -044import 
    org.apache.yetus.audience.InterfaceAudience;
    -045
    -046/**
    -047 * Encodes and decodes to and from Base64 
    notation.
    -048 *
    -049 * p
    -050 * Homepage: a 
    href="http://iharder.net/base64"http://iharder.net/base64/a;.
    -051 * /p
    -052 *
    -053 * p
    -054 * Change Log:
    -055 * /p
    -056 * ul
    -057 *   liv2.2.1 - Fixed bug using 
    URL_SAFE and ORDERED encodings. Fixed bug
    -058 * when using very small files 
    (~lt; 40 bytes)./li
    -059 *   liv2.2 - Added some helper 
    methods for encoding/decoding directly from
    -060 * one file to the next. Also added a 
    main() method to support command
    -061 * line encoding/decoding from one 
    file to the next. Also added these
    -062 * Base64 dialects:
    -063 * ol
    -064 *   liThe default is RFC3548 
    format./li
    -065 *   liUsing Base64.URLSAFE 
    generates URL and file name friendly format as
    -066 * described in Section 4 of 
    RFC3548.
    -067 * 
    http://www.faqs.org/rfcs/rfc3548.html/li;
    -068 *   liUsing Base64.ORDERED 
    generates URL and file name friendly format
    -069 * that preserves lexical 
    ordering as described in
    -070 * 
    http://www.faqs.org/qa/rfcc-1940.html/li;
    -071 * /ol
    -072 * p
    -073 * Special thanks to Jim Kellerman at 
    a href="http://www.powerset.com/";
    -074 * http://www.powerset.com//a; 
    for contributing the new Base64 dialects.
    -075 *   /li
    -076 *
    -077 *   liv2.1 - Cleaned up javadoc 
    comments and unused variables and methods.
    -078 * Added some convenience methods for 
    reading and writing to and from files.
    -079 *   /li
    -080 *   liv2.0.2 - Now specifies 
    UTF-8 encoding in places where the code fails on
    -081 * systems with other encodings (like 
    EBCDIC)./li
    -082 *   liv2.0.1 - Fixed an error 
    when decoding a single byte, that is, when the
    -083 * encoded data was a single 
    byte./li
    -084 *   liv2.0 - I got rid of 
    methods that used booleans to set options. Now
    -085 * everything is more consolidated 
    and cleaner. The code now detects when
    -086 * data that's being decoded is 
    gzip-compressed and will decompress it
    -087 * automatically. Generally things 
    are cleaner. You'll probably have to
    -088 * change some method calls that you 
    were making to support the new options
    -089 * format (ttint/tts 
    that you "OR" together)./li
    -090 *   liv1.5.1 - Fixed bug when 
    decompressing and decoding to a byte[] using
    -091 * ttdecode( String s, 
    boolean gzipCompressed )/tt. Added the ability to
    -092 * "suspend" encoding in the Output 
    Stream so you can turn on and off the
    -093 * encoding if you need to embed 
    base64 data in an otherwise "normal" stream
    -094 * (like an XML file)./li
    -095 *   liv1.5 - Output stream pases 
    on flush() command but doesn't do anything
    -096 * itself. This helps when using GZIP 
    streams. Added the ability to
    -097 * GZip-compress objects before 
    encoding them./li
    -098 *   liv1.4 - Added helper 
    methods to read/write files./li
    -099 *   liv1.3.6 - Fixed 
    OutputStream.flush() so that 'position' is reset./li
    -100 *   liv1.3.5 - Added flag to 
    turn on and off line breaks. Fixed bug in input
    -101 * stream where last buffer being 
    read, if not completely full, was not
    -102 * returned./li
    -103 *   liv1.3.4 - Fixed when 
    "improperly padded stream" error was thrown at the
    -104 * wrong time./li
    -105 *   liv1.3.3 - Fixed I/O streams 
    which were totally messed up./li
    -106 * /ul
    -107 *
    -108 * p
    -109 * I am placing this code in the Public 
    Domain. Do with it as you will. This
    -110 * software comes with no guarantees or 
    warranties but with plenty of
    -111 * well-wishing instead!
    -112 * p
    -113 * Please visit a 
    href="http://iharder.net/base64"http://iharder.net/base64/a;
    -114 * periodically to check for updates or 
    to contribute improvements.
    -115 * p
    -116 * author: Robert Harder, 
    r...@iharder.net
    -117 * br
    -118 * version: 2.2.1
    -119 */
    -120@InterfaceAudience.Public
    -121public class Base64 {
    -122
    -123  /*  P U B L I C   F I E L D S 
     */
    -124
    -125  /** No options specified. Value is 
    zero. */
    -126  public final static int NO_OPTIONS = 
    0;
    -127
    -128  /** Specify encoding. */
    -129  public final static int ENCODE = 1;
    -130
    -131  /** Specify decoding. */
    -132  public final static int DECODE = 0;
    -133
    -134  /** Specify that data should be 
    gzip-compressed. */
    -135  

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
    index 30d80c0..cb70db3 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
    @@ -271,378 +271,390 @@
     263  /**
     264   * @return The filter serialized using 
    pb
     265   */
    -266  public byte[] toByteArray() {
    -267FilterProtos.FuzzyRowFilter.Builder 
    builder = FilterProtos.FuzzyRowFilter.newBuilder();
    -268for (Pairbyte[], byte[] 
    fuzzyData : fuzzyKeysData) {
    -269  BytesBytesPair.Builder bbpBuilder = 
    BytesBytesPair.newBuilder();
    -270  
    bbpBuilder.setFirst(UnsafeByteOperations.unsafeWrap(fuzzyData.getFirst()));
    -271  
    bbpBuilder.setSecond(UnsafeByteOperations.unsafeWrap(fuzzyData.getSecond()));
    -272  
    builder.addFuzzyKeysData(bbpBuilder);
    -273}
    -274return 
    builder.build().toByteArray();
    -275  }
    -276
    -277  /**
    -278   * @param pbBytes A pb serialized 
    {@link FuzzyRowFilter} instance
    -279   * @return An instance of {@link 
    FuzzyRowFilter} made from codebytes/code
    -280   * @throws DeserializationException
    -281   * @see #toByteArray
    -282   */
    -283  public static FuzzyRowFilter 
    parseFrom(final byte[] pbBytes) throws DeserializationException {
    -284FilterProtos.FuzzyRowFilter proto;
    -285try {
    -286  proto = 
    FilterProtos.FuzzyRowFilter.parseFrom(pbBytes);
    -287} catch 
    (InvalidProtocolBufferException e) {
    -288  throw new 
    DeserializationException(e);
    -289}
    -290int count = 
    proto.getFuzzyKeysDataCount();
    -291ArrayListPairbyte[], 
    byte[] fuzzyKeysData = new ArrayList(count);
    -292for (int i = 0; i  count; ++i) 
    {
    -293  BytesBytesPair current = 
    proto.getFuzzyKeysData(i);
    -294  byte[] keyBytes = 
    current.getFirst().toByteArray();
    -295  byte[] keyMeta = 
    current.getSecond().toByteArray();
    -296  fuzzyKeysData.add(new 
    Pair(keyBytes, keyMeta));
    -297}
    -298return new 
    FuzzyRowFilter(fuzzyKeysData);
    -299  }
    -300
    -301  @Override
    -302  public String toString() {
    -303final StringBuilder sb = new 
    StringBuilder();
    -304sb.append("FuzzyRowFilter");
    -305sb.append("{fuzzyKeysData=");
    -306for (Pairbyte[], byte[] 
    fuzzyData : fuzzyKeysData) {
    -307  
    sb.append('{').append(Bytes.toStringBinary(fuzzyData.getFirst())).append(":");
    -308  
    sb.append(Bytes.toStringBinary(fuzzyData.getSecond())).append('}');
    -309}
    -310sb.append("}, ");
    -311return sb.toString();
    -312  }
    -313
    -314  // Utility methods
    -315
    -316  static enum SatisfiesCode {
    -317/** row satisfies fuzzy rule */
    -318YES,
    -319/** row doesn't satisfy fuzzy rule, 
    but there's possible greater row that does */
    -320NEXT_EXISTS,
    -321/** row doesn't satisfy fuzzy rule 
    and there's no greater row that does */
    -322NO_NEXT
    -323  }
    -324
    -325  @VisibleForTesting
    -326  static SatisfiesCode satisfies(byte[] 
    row, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) {
    -327return satisfies(false, row, 0, 
    row.length, fuzzyKeyBytes, fuzzyKeyMeta);
    -328  }
    -329
    -330  @VisibleForTesting
    -331  static SatisfiesCode satisfies(boolean 
    reverse, byte[] row, byte[] fuzzyKeyBytes,
    -332  byte[] fuzzyKeyMeta) {
    -333return satisfies(reverse, row, 0, 
    row.length, fuzzyKeyBytes, fuzzyKeyMeta);
    -334  }
    -335
    -336  static SatisfiesCode satisfies(boolean 
    reverse, byte[] row, int offset, int length,
    -337  byte[] fuzzyKeyBytes, byte[] 
    fuzzyKeyMeta) {
    -338
    -339if (!UNSAFE_UNALIGNED) {
    -340  return satisfiesNoUnsafe(reverse, 
    row, offset, length, fuzzyKeyBytes, fuzzyKeyMeta);
    -341}
    -342
    -343if (row == null) {
    -344  // do nothing, let scan to 
    proceed
    -345  return SatisfiesCode.YES;
    -346}
    -347length = Math.min(length, 
    fuzzyKeyBytes.length);
    -348int numWords = length / 
    Bytes.SIZEOF_LONG;
    -349
    -350int j = numWords  3; // 
    numWords * SIZEOF_LONG;
    -351
    -352for (int i = 0; i  j; i += 
    Bytes.SIZEOF_LONG) {
    -353  long fuzzyBytes = 
    UnsafeAccess.toLong(fuzzyKeyBytes, i);
    -354  long fuzzyMeta = 
    UnsafeAccess.toLong(fuzzyKeyMeta, i);
    -355  long rowValue = 
    UnsafeAccess.toLong(row, offset + i);
    -356  if ((rowValue  fuzzyMeta) != 
    (fuzzyBytes)) {
    -357// We always return NEXT_EXISTS
    -358return 
    SatisfiesCode.NEXT_EXISTS;
    -359  }
    -360}
    -361
    -362int off = j;
    -363
    -364if (length - off = 
    Bytes.SIZEOF_INT) {
    -365  int fuzzyBytes = 
    UnsafeAccess.toInt(fuzzyKeyBytes, off);
    -366  int fuzzyMeta = 
    UnsafeAccess.toInt(fuzzyKeyMeta, off);
    -367  int rowValue = 
    UnsafeAccess.toInt(row, offset + off);
    -368  if ((rowValue  fuzzyMeta) != 
    (fuzzyBytes)) {
    -369// We always return NEXT_EXISTS
    -370return 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    index f1a2443..a469e93 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
    @@ -1350,415 +1350,415 @@
     1342return delete;
     1343  }
     1344
    -1345  public static Put 
    makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
    -1346byte[] seqBytes = 
    Bytes.toBytes(seq);
    -1347return new Put(encodedRegionName)
    -1348
    .addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
    -1349
    .addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
    -1350  }
    -1351
    -1352
    -1353  public static Put 
    makeDaughterPut(byte[] encodedRegionName, byte[] value) {
    -1354return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1355daughterNameCq, value);
    -1356  }
    -1357
    -1358  public static Put makeParentPut(byte[] 
    encodedRegionName, byte[] value) {
    -1359return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1360parentNameCq, value);
    -1361  }
    -1362
    -1363  /**
    -1364   * Adds split daughters to the Put
    -1365   */
    -1366  public static Put 
    addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
    -1367if (splitA != null) {
    -1368  put.addImmutable(
    -1369HConstants.CATALOG_FAMILY, 
    HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
    -1370}
    -1371if (splitB != null) {
    -1372  put.addImmutable(
    -1373HConstants.CATALOG_FAMILY, 
    HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
    -1374}
    -1375return put;
    -1376  }
    -1377
    -1378  /**
    -1379   * Put the passed 
    codeputs/code to the codehbase:meta/code 
    table.
    -1380   * Non-atomic for multi puts.
    -1381   * @param connection connection we're 
    using
    -1382   * @param puts Put to add to 
    hbase:meta
    -1383   * @throws IOException
    -1384   */
    -1385  public static void 
    putToMetaTable(final Connection connection, final Put... puts)
    -1386throws IOException {
    -1387put(getMetaHTable(connection), 
    Arrays.asList(puts));
    -1388  }
    -1389
    -1390  /**
    -1391   * @param t Table to use (will be 
    closed when done).
    -1392   * @param puts puts to make
    -1393   * @throws IOException
    -1394   */
    -1395  private static void put(final Table t, 
    final ListPut puts) throws IOException {
    -1396try {
    -1397  if (METALOG.isDebugEnabled()) {
    -1398
    METALOG.debug(mutationsToString(puts));
    -1399  }
    -1400  t.put(puts);
    -1401} finally {
    -1402  t.close();
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Put the passed 
    codeps/code to the codehbase:meta/code table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param ps Put to add to 
    hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
    -1413throws IOException {
    -1414Table t = 
    getMetaHTable(connection);
    -1415try {
    -1416  if (METALOG.isDebugEnabled()) {
    -1417
    METALOG.debug(mutationsToString(ps));
    -1418  }
    -1419  t.put(ps);
    -1420} finally {
    -1421  t.close();
    -1422}
    -1423  }
    -1424
    -1425  /**
    -1426   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1427   * @param connection connection we're 
    using
    -1428   * @param d Delete to add to 
    hbase:meta
    -1429   * @throws IOException
    -1430   */
    -1431  static void deleteFromMetaTable(final 
    Connection connection, final Delete d)
    -1432throws IOException {
    -1433ListDelete dels = new 
    ArrayList(1);
    -1434dels.add(d);
    -1435deleteFromMetaTable(connection, 
    dels);
    -1436  }
    -1437
    -1438  /**
    -1439   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1440   * @param connection connection we're 
    using
    -1441   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1442   * @throws IOException
    -1443   */
    -1444  public static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1445throws IOException {
    -1446Table t = 
    getMetaHTable(connection);
    -1447try {
    -1448  if (METALOG.isDebugEnabled()) {
    -1449
    METALOG.debug(mutationsToString(deletes));
    -1450  }
    -1451  t.delete(deletes);
    -1452} finally {
    -1453  t.close();
    -1454}
    -1455  }
    -1456
    -1457  /**
    -1458   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1459   * @param metaRows rows in 
    hbase:meta
    -1460   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1461   * @param 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
    index 7c59e27..c904c56 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
    @@ -119,4048 +119,4054 @@
     111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
     112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
     113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -133import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -134import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -135import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -136import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -137import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -138import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -139import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -140import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -141import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
    -142import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
    -143import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
    -144import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
    -145import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
    -146import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
    -147import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
    -148import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
    -149import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
    -150import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
    -151import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
    -152import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
    -153import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
    -154import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
    -155import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
    -156import 
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    index 8067f23..25588e6 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    @@ -713,7 +713,7 @@ public interface 
     
     mutateRow
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">VoidmutateRow(RowMutationsmutation)
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">VoidmutateRow(RowMutationsmutation)
     Performs multiple mutations atomically on a single row. 
    Currently Put and
      Delete are 
    supported.
     
    @@ -732,7 +732,7 @@ public interface 
     
     scan
    -voidscan(Scanscan,
    +voidscan(Scanscan,
       Cconsumer)
     The scan API uses the observer pattern.
     
    @@ -751,7 +751,7 @@ public interface 
     
     getScanner
    -defaultResultScannergetScanner(byte[]family)
    +defaultResultScannergetScanner(byte[]family)
     Gets a scanner on the current table for the given 
    family.
     
     Parameters:
    @@ -767,7 +767,7 @@ public interface 
     
     getScanner
    -defaultResultScannergetScanner(byte[]family,
    +defaultResultScannergetScanner(byte[]family,
      byte[]qualifier)
     Gets a scanner on the current table for the given family 
    and qualifier.
     
    @@ -785,7 +785,7 @@ public interface 
     
     getScanner
    -ResultScannergetScanner(Scanscan)
    +ResultScannergetScanner(Scanscan)
     Returns a scanner on the current table as specified by the 
    Scan object.
     
     Parameters:
    @@ -801,7 +801,7 @@ public interface 
     
     scanAll
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResultscanAll(Scanscan)
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResultscanAll(Scanscan)
     Return all the results that match the given scan object.
      
      Notice that usually you should use this method with a Scan object that has limit set.
    @@ -847,7 +847,7 @@ public interface 
     
     exists
    -defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleanexists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    +defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleanexists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
     Test for the existence of columns in the table, as 
    specified by the Gets.
      
      This will return a list of booleans. Each value will be true if the related 
    Get matches one or
    @@ -868,7 +868,7 @@ public interface 
     
     existsAll
    -defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">BooleanexistsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
    index d29eb4b..7c9ae60 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
    +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
     
     
     All Implemented Interfaces:
    -Cell, SettableSequenceId
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
     
     
     Direct Known Subclasses:
    @@ -131,7 +131,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class PrivateCellUtil.FirstOnRowByteBufferCell
    +private static class PrivateCellUtil.FirstOnRowByteBufferCell
     extends PrivateCellUtil.EmptyByteBufferCell
     
     
    @@ -152,18 +152,36 @@ extends Field and Description
     
     
    +private static int
    +FIXED_OVERHEAD
    +
    +
     private short
     rlength
     
    -
    +
     private int
     roffset
     
    -
    +
     private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
     rowBuff
     
     
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
    +CELL_NOT_BASED_ON_CHUNK
    +
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
    +MAX_TAGS_LENGTH
    +
     
     
     
    @@ -217,13 +235,17 @@ extends byte
     getTypeByte()
     
    +
    +long
    +heapSize()
    +
     
     
     
     
     
     Methods inherited from classorg.apache.hadoop.hbase.PrivateCellUtil.EmptyByteBufferCell
    -getFamilyArray,
     getFamilyByteBuffer,
     getFamilyLength,
     getFamilyOffset,
     getFamilyPosition,
     getQualifierArray,
     getQualifierByteBuffer,
     getQualifierLength, getQualifierOffset,
     getQualifierPosition,
     getRowArray,
     getRowOffset,
     getSequenceId,
     getTagsArray,
     getTagsByteBuffer,
     getTagsLength, getTagsOffset,
     getTagsPosition,
     getValueArray,
     getValueByteBuffer,
     getValueLength,
     getValueOffset,
     getValuePosition,
     setSequenceId
    +getFamilyArray,
     getFamilyByteBuffer,
     getFamilyLength,
     getFamilyOffset,
     getFamilyPosition,
     getQualifierArray,
     getQualifierByteBuffer,
     getQualifierLength, getQualifierOffset,
     getQualifierPosition,
     getRowArray,
     getRowOffset,
     getSequenceId,
     getTagsArray,
     getTagsByteBuffer,
     getTagsLength, getTagsOffset,
     getTagsPosition,
     getValueArray,
     getValueByteBuffer,
     getValueLength,
     getValueOffset,
     getValuePosition,
     setSequenceId, setTimestamp,
     setTimestamp
     
     
     
    @@ -232,6 +254,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
     
    +
    +
    +
    +
    +Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
    +deepClone,
     getChunkId,
     getSerializedSize,
     write,
     write
    +
    +
    +
    +
    +
    +Methods inherited from 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
    index 4c37cbe..cfb8ee4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
    @@ -228,837 +228,853 @@
     220   * of candidates. If you remove all the 
    candidates then the compaction will be canceled.
     221   * pSupports Coprocessor 
    'bypass' -- 'bypass' is how this method indicates that it changed
     222   * the passed in 
    codecandidates/code.
    -223   * @param c the environment provided by 
    the region server
    -224   * @param store the store where 
    compaction is being requested
    -225   * @param candidates the store files 
    currently available for compaction
    -226   * @param tracker tracker used to track 
    the life cycle of a compaction
    -227   */
    -228  default void 
    preCompactSelection(ObserverContextRegionCoprocessorEnvironment c, 
    Store store,
    -229  List? extends StoreFile 
    candidates, CompactionLifeCycleTracker tracker)
    -230  throws IOException {}
    -231
    -232  /**
    -233   * Called after the {@link StoreFile}s 
    to compact have been selected from the available
    -234   * candidates.
    -235   * @param c the environment provided by 
    the region server
    -236   * @param store the store being 
    compacted
    -237   * @param selected the store files 
    selected to compact
    -238   * @param tracker tracker used to track 
    the life cycle of a compaction
    -239   * @param request the requested 
    compaction
    -240   */
    -241  default void 
    postCompactSelection(ObserverContextRegionCoprocessorEnvironment c, 
    Store store,
    -242  List? extends StoreFile 
    selected, CompactionLifeCycleTracker tracker,
    -243  CompactionRequest request) {}
    -244
    -245  /**
    -246   * Called before we open store scanner 
    for compaction. You can use the {@code options} to change max
    -247   * versions and TTL for the scanner 
    being opened.
    -248   * @param c the environment provided by 
    the region server
    -249   * @param store the store being 
    compacted
    -250   * @param scanType type of Scan
    -251   * @param options used to change max 
    versions and TTL for the scanner being opened
    -252   * @param tracker tracker used to track 
    the life cycle of a compaction
    -253   * @param request the requested 
    compaction
    -254   */
    -255  default void 
    preCompactScannerOpen(ObserverContextRegionCoprocessorEnvironment c, 
    Store store,
    -256  ScanType scanType, ScanOptions 
    options, CompactionLifeCycleTracker tracker,
    -257  CompactionRequest request) throws 
    IOException {}
    -258
    -259  /**
    -260   * Called prior to writing the {@link 
    StoreFile}s selected for compaction into a new
    -261   * {@code StoreFile}.
    -262   * p
    -263   * To override or modify the compaction 
    process, implementing classes can wrap the provided
    -264   * {@link InternalScanner} with a 
    custom implementation that is returned from this method. The
    -265   * custom scanner can then inspect 
    {@link org.apache.hadoop.hbase.Cell}s from the wrapped scanner,
    -266   * applying its own policy to what gets 
    written.
    -267   * @param c the environment provided by 
    the region server
    -268   * @param store the store being 
    compacted
    -269   * @param scanner the scanner over 
    existing data used in the store file rewriting
    -270   * @param scanType type of Scan
    -271   * @param tracker tracker used to track 
    the life cycle of a compaction
    -272   * @param request the requested 
    compaction
    -273   * @return the scanner to use during 
    compaction. Should not be {@code null} unless the
    -274   * implementation is writing 
    new store files on its own.
    -275   */
    -276  default InternalScanner 
    preCompact(ObserverContextRegionCoprocessorEnvironment c, Store 
    store,
    -277  InternalScanner scanner, ScanType 
    scanType, CompactionLifeCycleTracker tracker,
    -278  CompactionRequest request) throws 
    IOException {
    -279return scanner;
    -280  }
    -281
    -282  /**
    -283   * Called after compaction has 
    completed and the new store file has been moved in to place.
    -284   * @param c the environment provided by 
    the region server
    -285   * @param store the store being 
    compacted
    -286   * @param resultFile the new store file 
    written out during compaction
    -287   * @param tracker used to track the 
    life cycle of a compaction
    -288   * @param request the requested 
    compaction
    -289   */
    -290  default void 
    postCompact(ObserverContextRegionCoprocessorEnvironment c, Store 
    store,
    -291  StoreFile resultFile, 
    CompactionLifeCycleTracker tracker, CompactionRequest request)
    -292  throws IOException {}
    -293
    -294  /**
    -295   * Called before the region is reported 
    as closed to the master.
    -296   * @param 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    index 3edfbef..9707b2c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    @@ -2459,5936 +2459,5935 @@
     2451  }
     2452
     2453  for (HStore s : storesToFlush) {
    -2454MemStoreSize flushableSize = 
    s.getFlushableSize();
    -2455
    totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
    -2456
    storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
    -2457  
    s.createFlushContext(flushOpSeqId, tracker));
    -2458// for writing stores to WAL
    -2459
    committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
    -2460
    storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
    flushableSize);
    -2461  }
    -2462
    -2463  // write the snapshot start to 
    WAL
    -2464  if (wal != null  
    !writestate.readOnly) {
    -2465FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
    -2466getRegionInfo(), 
    flushOpSeqId, committedFiles);
    -2467// No sync. Sync is below where 
    no updates lock and we do FlushAction.COMMIT_FLUSH
    -2468WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2469mvcc);
    -2470  }
    -2471
    -2472  // Prepare flush (take a 
    snapshot)
    -2473  for (StoreFlushContext flush : 
    storeFlushCtxs.values()) {
    -2474flush.prepare();
    -2475  }
    -2476} catch (IOException ex) {
    -2477  doAbortFlushToWAL(wal, 
    flushOpSeqId, committedFiles);
    -2478  throw ex;
    -2479} finally {
    -2480  
    this.updatesLock.writeLock().unlock();
    -2481}
    -2482String s = "Finished memstore 
    snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
    -2483"flushsize=" + 
    totalSizeOfFlushableStores;
    -2484status.setStatus(s);
    -2485doSyncOfUnflushedWALChanges(wal, 
    getRegionInfo());
    -2486return new 
    PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
    startTime,
    -2487flushOpSeqId, flushedSeqId, 
    totalSizeOfFlushableStores);
    -2488  }
    -2489
    -2490  /**
    -2491   * Utility method broken out of 
    internalPrepareFlushCache so that method is smaller.
    -2492   */
    -2493  private void 
    logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
    -2494if (!LOG.isInfoEnabled()) {
    -2495  return;
    -2496}
    -2497// Log a fat line detailing what is 
    being flushed.
    -2498StringBuilder perCfExtras = null;
    -2499if (!isAllFamilies(storesToFlush)) 
    {
    -2500  perCfExtras = new 
    StringBuilder();
    -2501  for (HStore store: storesToFlush) 
    {
    -2502perCfExtras.append("; 
    ").append(store.getColumnFamilyName());
    -2503perCfExtras.append("=")
    -2504
    .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
    -2505  }
    -2506}
    -2507LOG.info("Flushing " + + 
    storesToFlush.size() + "/" + stores.size() +
    -2508" column families, memstore=" + 
    StringUtils.byteDesc(this.memstoreDataSize.get()) +
    -2509((perCfExtras != null  
    perCfExtras.length()  0)? perCfExtras.toString(): "") +
    -2510((wal != null) ? "" : "; WAL is 
    null, using passed sequenceid=" + sequenceId));
    -2511  }
    -2512
    -2513  private void doAbortFlushToWAL(final 
    WAL wal, final long flushOpSeqId,
    -2514  final Mapbyte[], 
    ListPath committedFiles) {
    -2515if (wal == null) return;
    -2516try {
    -2517  FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
    -2518  getRegionInfo(), flushOpSeqId, 
    committedFiles);
    -2519  WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2520  mvcc);
    -2521} catch (Throwable t) {
    -2522  LOG.warn("Received unexpected 
    exception trying to write ABORT_FLUSH marker to WAL:" +
    -2523  
    StringUtils.stringifyException(t));
    -2524  // ignore this since we will be 
    aborting the RS with DSE.
    -2525}
    -2526// we have called 
    wal.startCacheFlush(), now we have to abort it
    -2527
    wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
    -2528  }
    -2529
    -2530  /**
    -2531   * Sync unflushed WAL changes. See 
    HBASE-8208 for details
    -2532   */
    -2533  private static void 
    doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
    -2534  throws IOException {
    -2535if (wal == null) {
    -2536  return;
    -2537}
    -2538try {
    -2539  wal.sync(); // ensure that flush 
    marker is sync'ed
    -2540} catch (IOException ioe) {
    -2541  
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    index 11bfb15..915e78a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    @@ -782,557 +782,562 @@
     774  /**
     775   * Default value of {@link 
    #HBASE_CLIENT_RETRIES_NUMBER}.
     776   */
    -777  public static final int 
    DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
    +777  public static final int 
    DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
     778
    -779  /**
    -780   * Parameter name to set the default 
    scanner caching for all clients.
    -781   */
    -782  public static final String 
    HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
    +779  public static final String 
    HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
    +780  
    "hbase.client.serverside.retries.multiplier";
    +781
    +782  public static final int 
    DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 3;
     783
     784  /**
    -785   * Default value for {@link 
    #HBASE_CLIENT_SCANNER_CACHING}
    +785   * Parameter name to set the default 
    scanner caching for all clients.
     786   */
    -787  public static final int 
    DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
    +787  public static final String 
    HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
     788
     789  /**
    -790   * Parameter name for number of rows 
    that will be fetched when calling next on
    -791   * a scanner if it is not served from 
    memory. Higher caching values will
    -792   * enable faster scanners but will eat 
    up more memory and some calls of next
    -793   * may take longer and longer times 
    when the cache is empty.
    -794   */
    -795  public static final String 
    HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
    -796
    -797  /**
    -798   * Default value of {@link 
    #HBASE_META_SCANNER_CACHING}.
    +790   * Default value for {@link 
    #HBASE_CLIENT_SCANNER_CACHING}
    +791   */
    +792  public static final int 
    DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
    +793
    +794  /**
    +795   * Parameter name for number of rows 
    that will be fetched when calling next on
    +796   * a scanner if it is not served from 
    memory. Higher caching values will
    +797   * enable faster scanners but will eat 
    up more memory and some calls of next
    +798   * may take longer and longer times 
    when the cache is empty.
     799   */
    -800  public static final int 
    DEFAULT_HBASE_META_SCANNER_CACHING = 100;
    +800  public static final String 
    HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
     801
     802  /**
    -803   * Parameter name for number of 
    versions, kept by meta table.
    +803   * Default value of {@link 
    #HBASE_META_SCANNER_CACHING}.
     804   */
    -805  public static final String 
    HBASE_META_VERSIONS = "hbase.meta.versions";
    +805  public static final int 
    DEFAULT_HBASE_META_SCANNER_CACHING = 100;
     806
     807  /**
    -808   * Default value of {@link 
    #HBASE_META_VERSIONS}.
    +808   * Parameter name for number of 
    versions, kept by meta table.
     809   */
    -810  public static final int 
    DEFAULT_HBASE_META_VERSIONS = 3;
    +810  public static final String 
    HBASE_META_VERSIONS = "hbase.meta.versions";
     811
     812  /**
    -813   * Parameter name for number of 
    versions, kept by meta table.
    +813   * Default value of {@link 
    #HBASE_META_VERSIONS}.
     814   */
    -815  public static final String 
    HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
    +815  public static final int 
    DEFAULT_HBASE_META_VERSIONS = 3;
     816
     817  /**
    -818   * Default value of {@link 
    #HBASE_META_BLOCK_SIZE}.
    +818   * Parameter name for number of 
    versions, kept by meta table.
     819   */
    -820  public static final int 
    DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
    +820  public static final String 
    HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
     821
     822  /**
    -823   * Parameter name for unique identifier 
    for this {@link org.apache.hadoop.conf.Configuration}
    -824   * instance. If there are two or more 
    {@link org.apache.hadoop.conf.Configuration} instances that,
    -825   * for all intents and purposes, are 
    the same except for their instance ids, then they will not be
    -826   * able to share the same 
    org.apache.hadoop.hbase.client.HConnection instance. On the other hand,
    -827   * even if the instance ids are the 
    same, it could result in non-shared
    -828   * 
    org.apache.hadoop.hbase.client.HConnection instances if some of the other 
    connection parameters
    -829   * differ.
    -830   */
    -831  public static final String 
    HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
    -832
    -833  /**
    -834   * The client scanner timeout period in 
    milliseconds.
    +823   * Default value of {@link 
    #HBASE_META_BLOCK_SIZE}.
    +824   */
    +825  public static final int 
    DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
    +826
    +827  /**
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
    b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
    index b963bf7..c00ba22 100644
    --- a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
    +++ b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":42,"i55":10,"i56":9};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":42,"i11":10,"i12":42,"i13":10,"i14":10,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":42,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":42,"i54":42,"i55":42,"i56":9};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
    Methods"],32:["t6","Deprecated Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -314,8 +314,7 @@ implements CompareFilter.CompareOpcompareOp,
       byte[]value,
       Deletedelete)
    -Atomically checks if a row/family/qualifier value matches 
    the expected
    - value.
    +Deprecated.
     
     
     
    @@ -338,7 +337,7 @@ implements CompareFilter.CompareOpcompareOp,
       byte[]value,
       RowMutationsrm)
    -Atomically checks if a row/family/qualifier value matches 
    the expected value.
    +Deprecated.
     
     
     
    @@ -371,8 +370,7 @@ implements CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Atomically checks if a row/family/qualifier value matches 
    the expected
    - value.
    +Deprecated.
     
     
     
    @@ -546,7 +544,7 @@ implements 
     HTableDescriptor
     getTableDescriptor()
    -Gets the table descriptor for 
    this table.
    +Deprecated.
     
     
     
    @@ -615,15 +613,13 @@ implements 
     void
     setOperationTimeout(intoperationTimeout)
    -Set timeout (millisecond) of each operation in this Table 
    instance, will override the value
    - of hbase.client.operation.timeout in configuration.
    +Deprecated.
     
     
     
     void
     setReadRpcTimeout(intreadRpcTimeout)
    -Set timeout (millisecond) of each rpc read request in 
    operations of this Table instance, will
    - override the value of hbase.rpc.read.timeout in configuration.
    +Deprecated.
     
     
     
    @@ -635,8 +631,7 @@ implements 
     void
     setWriteRpcTimeout(intwriteRpcTimeout)
    -Set timeout (millisecond) of each rpc write request in 
    operations of this Table instance, will
    - override the value of hbase.rpc.write.timeout in configuration.
    +Deprecated.
     
     
     
    @@ -866,8 +861,10 @@ implements 
     
     getTableDescriptor
    -publicHTableDescriptorgetTableDescriptor()
    -throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
     title="class or interface in java.lang">@Deprecated
    +publicHTableDescriptorgetTableDescriptor()
    +throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +Deprecated.
     Description copied from 
    interface:Table
     Gets the table descriptor for 
    this table.
     
    @@ -884,7 +881,7 @@ implements 
     
     close
    -publicvoidclose()
    +publicvoidclose()
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    interface:Table
     Releases any resources held or pending changes in internal 
    buffers.
    @@ -906,7 +903,7 @@ implements 
     
     get
    -publicResultget(Getget)
    +publicResultget(Getget)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    interface:Table
     Extracts certain cells from a given row.
    @@ -930,7 +927,7 @@ implements 
     
     get
    -publicResult[]get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    index 40cd159..2da0903 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    @@ -260,7 +260,7 @@
     252
     253// Update meta events (for testing)
     254if (hasProcExecutor) {
    -255  
    getProcedureScheduler().suspendEvent(metaLoadEvent);
    +255  metaLoadEvent.suspend();
     256  setFailoverCleanupDone(false);
     257  for (RegionInfo hri: 
    getMetaRegionSet()) {
     258setMetaInitialized(hri, false);
    @@ -421,1455 +421,1454 @@
     413  }
     414
     415  public boolean 
    waitMetaInitialized(final Procedure proc, final RegionInfo regionInfo) {
    -416return 
    getProcedureScheduler().waitEvent(
    -417  
    getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc);
    -418  }
    -419
    -420  private void setMetaInitialized(final 
    RegionInfo metaRegionInfo, final boolean isInitialized) {
    -421assert isMetaRegion(metaRegionInfo) : 
    "unexpected non-meta region " + metaRegionInfo;
    -422final ProcedureEvent metaInitEvent = 
    getMetaInitializedEvent(metaRegionInfo);
    -423if (isInitialized) {
    -424  
    getProcedureScheduler().wakeEvent(metaInitEvent);
    -425} else {
    -426  
    getProcedureScheduler().suspendEvent(metaInitEvent);
    -427}
    -428  }
    -429
    -430  private ProcedureEvent 
    getMetaInitializedEvent(final RegionInfo metaRegionInfo) {
    -431assert isMetaRegion(metaRegionInfo) : 
    "unexpected non-meta region " + metaRegionInfo;
    -432// TODO: handle multiple meta.
    -433return metaInitializedEvent;
    -434  }
    -435
    -436  public boolean waitMetaLoaded(final 
    Procedure proc) {
    -437return 
    getProcedureScheduler().waitEvent(metaLoadEvent, proc);
    -438  }
    -439
    -440  protected void wakeMetaLoadedEvent() 
    {
    -441
    getProcedureScheduler().wakeEvent(metaLoadEvent);
    -442assert isMetaLoaded() : "expected 
    meta to be loaded";
    -443  }
    -444
    -445  public boolean isMetaLoaded() {
    -446return metaLoadEvent.isReady();
    -447  }
    -448
    -449  // 
    
    -450  //  TODO: Sync helpers
    -451  // 
    
    -452  public void assignMeta(final RegionInfo 
    metaRegionInfo) throws IOException {
    -453assignMeta(metaRegionInfo, null);
    -454  }
    -455
    -456  public void assignMeta(final RegionInfo 
    metaRegionInfo, final ServerName serverName)
    -457  throws IOException {
    -458assert isMetaRegion(metaRegionInfo) : 
    "unexpected non-meta region " + metaRegionInfo;
    -459AssignProcedure proc;
    -460if (serverName != null) {
    -461  LOG.debug("Try assigning Meta " + 
    metaRegionInfo + " to " + serverName);
    -462  proc = 
    createAssignProcedure(metaRegionInfo, serverName);
    -463} else {
    -464  LOG.debug("Assigning " + 
    metaRegionInfo.getRegionNameAsString());
    -465  proc = 
    createAssignProcedure(metaRegionInfo, false);
    -466}
    -467
    ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
    proc);
    -468  }
    -469
    -470  /**
    -471   * Start a new thread to check if there 
    are region servers whose versions are higher than others.
    -472   * If so, move all system table regions 
    to RS with the highest version to keep compatibility.
    -473   * The reason is, RS in new version may 
    not be able to access RS in old version when there are
    -474   * some incompatible changes.
    -475   */
    -476  public void 
    checkIfShouldMoveSystemRegionAsync() {
    -477new Thread(() - {
    -478  try {
    -479synchronized 
    (checkIfShouldMoveSystemRegionLock) {
    -480  ListRegionPlan plans = 
    new ArrayList();
    -481  for (ServerName server : 
    getExcludedServersForSystemTable()) {
    -482if 
    (master.getServerManager().isServerDead(server)) {
    -483  // TODO: See HBASE-18494 
    and HBASE-18495. Though getExcludedServersForSystemTable()
    -484  // considers only online 
    servers, the server could be queued for dead server
    -485  // processing. As region 
    assignments for crashed server is handled by
    -486  // ServerCrashProcedure, do 
    NOT handle them here. The goal is to handle this through
    -487  // regular flow of 
    LoadBalancer as a favored node and not to have this special
    -488  // handling.
    -489  continue;
    -490}
    -491ListRegionInfo 
    regionsShouldMove = getCarryingSystemTables(server);
    -492 

    [24/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.html
     
    b/devapidocs/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.html
    index ef2120e..a88ecab 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.html
    @@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class PartitionedMobCompactor
    +public class PartitionedMobCompactor
     extends MobCompactor
     An implementation of MobCompactor that 
    compacts the mob files in partitions.
     
    @@ -398,7 +398,7 @@ extends 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.apache.commons.logging.Log LOG
     
     
     
    @@ -407,7 +407,7 @@ extends 
     
     mergeableSize
    -protectedlong mergeableSize
    +protectedlong mergeableSize
     
     
     
    @@ -416,7 +416,7 @@ extends 
     
     delFileMaxCount
    -protectedint delFileMaxCount
    +protectedint delFileMaxCount
     
     
     
    @@ -425,7 +425,7 @@ extends 
     
     compactionBatchSize
    -protectedint compactionBatchSize
    +protectedint compactionBatchSize
     The number of files compacted in a batch
     
     
    @@ -435,7 +435,7 @@ extends 
     
     compactionKVMax
    -protectedint compactionKVMax
    +protectedint compactionKVMax
     
     
     
    @@ -444,7 +444,7 @@ extends 
     
     tempPath
    -private finalorg.apache.hadoop.fs.Path tempPath
    +private finalorg.apache.hadoop.fs.Path tempPath
     
     
     
    @@ -453,7 +453,7 @@ extends 
     
     bulkloadPath
    -private finalorg.apache.hadoop.fs.Path bulkloadPath
    +private finalorg.apache.hadoop.fs.Path bulkloadPath
     
     
     
    @@ -462,7 +462,7 @@ extends 
     
     compactionCacheConfig
    -private finalCacheConfig compactionCacheConfig
    +private finalCacheConfig compactionCacheConfig
     
     
     
    @@ -471,7 +471,7 @@ extends 
     
     refCellTags
    -private finalbyte[] refCellTags
    +private finalbyte[] refCellTags
     
     
     
    @@ -480,7 +480,7 @@ extends 
     
     cryptoContext
    -privateEncryption.Context cryptoContext
    +privateEncryption.Context cryptoContext
     
     
     
    @@ -497,7 +497,7 @@ extends 
     
     PartitionedMobCompactor
    -publicPartitionedMobCompactor(org.apache.hadoop.conf.Configurationconf,
    +publicPartitionedMobCompactor(org.apache.hadoop.conf.Configurationconf,
    org.apache.hadoop.fs.FileSystemfs,
    TableNametableName,
    ColumnFamilyDescriptorcolumn,
    @@ -523,7 +523,7 @@ extends 
     
     compact
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Pathcompact(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatusfiles,
    +publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Pathcompact(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatusfiles,
    booleanallFiles)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:MobCompactor
    @@ -547,7 +547,7 @@ extends 
     
     select
    -protectedPartitionedMobCompactionRequestselect(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatuscandidates,
    +protectedPartitionedMobCompactionRequestselect(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatuscandidates,
      booleanallFiles)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Selects the compacted mob/del files.
    @@ -569,7 +569,7 @@ extends 
     
     performCompaction
    -protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathperformCompaction(PartitionedMobCompactionRequestrequest)
    +protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathperformCompaction(PartitionedMobCompactionRequestrequest)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     

      1   2   3   >