[22/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 68302bf..a5a8905 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -2197,1768 +2197,1775 @@
 2189  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2190}
 2191
-2192for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
-2193  if (hcd.getTimeToLive() = 0) 
{
-2194String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
-2195
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2196  }
-2197
-2198  // check blockSize
-2199  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
-2200String message = "Block size for 
column family " + hcd.getNameAsString()
-2201+ "  must be between 1K and 
16MB.";
+2192// check that we have minimum 1 
region replicas
+2193int regionReplicas = 
htd.getRegionReplication();
+2194if (regionReplicas  1) {
+2195  String message = "Table region 
replication should be at least one.";
+2196  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2197}
+2198
+2199for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
+2200  if (hcd.getTimeToLive() = 0) 
{
+2201String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
 2202
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2203  }
 2204
-2205  // check versions
-2206  if (hcd.getMinVersions()  0) 
{
-2207String message = "Min versions 
for column family " + hcd.getNameAsString()
-2208  + "  must be positive.";
+2205  // check blockSize
+2206  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
+2207String message = "Block size for 
column family " + hcd.getNameAsString()
+2208+ "  must be between 1K and 
16MB.";
 2209
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2210  }
-2211  // max versions already being 
checked
-2212
-2213  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
-2214  //  does not throw 
IllegalArgumentException
-2215  // check minVersions = 
maxVerions
-2216  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
-2217String message = "Min versions 
for column family " + hcd.getNameAsString()
-2218+ " must be less than the 
Max versions.";
-2219
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2220  }
-2221
-  // check replication scope
-2223  checkReplicationScope(hcd);
-2224  // check bloom filter type
-2225  checkBloomFilterType(hcd);
-2226
-2227  // check data replication factor, 
it can be 0(default value) when user has not explicitly
-2228  // set the value, in this case we 
use default replication factor set in the file system.
-2229  if (hcd.getDFSReplication()  
0) {
-2230String message = "HFile 
Replication for column family " + hcd.getNameAsString()
-2231+ "  must be greater than 
zero.";
-2232
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2233  }
-2234
-2235  // TODO: should we check 
coprocessors and encryption ?
-2236}
-2237  }
-2238
-2239  private void 
checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
-2240// check replication scope
-2241WALProtos.ScopeType scop = 
WALProtos.ScopeType.valueOf(hcd.getScope());
-2242if (scop == null) {
-2243  String message = "Replication 
scope for column family "
-2244  + hcd.getNameAsString() + " is 
" + hcd.getScope() + " which is invalid.";
+2211
+2212  // check versions
+2213  if (hcd.getMinVersions()  0) 
{
+2214String message = "Min versions 
for column family " + hcd.getNameAsString()
+2215  + "  must be positive.";
+2216
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2217  }
+2218  // max versions already being 
checked
+2219
+2220  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
+2221  //  does not throw 
IllegalArgumentException
+  // check minVersions = 
maxVerions
+2223  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
+2224String message = "Min versions 
for column family " + hcd.getNameAsString()
+2225+ " must be less than the 
Max versions.";
+2226

[22/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 18a233a..97b6786 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -421,34 +421,38 @@ extends 
 void
-postListDecommissionedRegionServers()
+postIsRpcThrottleEnabled(booleanenabled)
 
 
 void
-postListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
+postListDecommissionedRegionServers()
 
 
 void
-postListReplicationPeers(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringregex)
+postListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
 
 
 void
-postListRSGroups()
+postListReplicationPeers(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringregex)
 
 
 void
-postListSnapshot(SnapshotDescriptionsnapshot)
+postListRSGroups()
 
 
 void
+postListSnapshot(SnapshotDescriptionsnapshot)
+
+
+void
 postLockHeartbeat(LockProcedureproc,
  booleankeepAlive)
 
-
+
 void
 postMergeRegions(RegionInfo[]regionsToMerge)
 
-
+
 void
 postMergeRegionsCommit(RegionInfo[]regionsToMerge,
   RegionInfomergedRegion,
@@ -456,61 +460,61 @@ extends Invoked after merge 

[22/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRegionLocator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRegionLocator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRegionLocator.html
index dc4399b..a628974 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRegionLocator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRegionLocator.html
@@ -26,148 +26,147 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021import static 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException;
-022import static 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException;
-023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
-026
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029import java.util.function.Consumer;
-030import java.util.function.Function;
-031import java.util.function.Supplier;
-032
-033import 
org.apache.hadoop.hbase.HRegionLocation;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-039import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041
-042/**
-043 * The asynchronous region locator.
-044 */
-045@InterfaceAudience.Private
-046class AsyncRegionLocator {
+021import static 
org.apache.hadoop.hbase.util.FutureUtils.addListener;
+022
+023import 
java.util.concurrent.CompletableFuture;
+024import java.util.concurrent.TimeUnit;
+025import java.util.function.Supplier;
+026import 
org.apache.hadoop.hbase.HBaseIOException;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.RegionException;
+029import 
org.apache.hadoop.hbase.RegionLocations;
+030import 
org.apache.hadoop.hbase.TableName;
+031import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
+032import 
org.apache.hadoop.hbase.util.Bytes;
+033import 
org.apache.yetus.audience.InterfaceAudience;
+034import org.slf4j.Logger;
+035import org.slf4j.LoggerFactory;
+036
+037import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+038import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
+039
+040/**
+041 * The asynchronous region locator.
+042 */
+043@InterfaceAudience.Private
+044class AsyncRegionLocator {
+045
+046  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncRegionLocator.class);
 047
-048  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncRegionLocator.class);
+048  private final HashedWheelTimer 
retryTimer;
 049
-050  private final HashedWheelTimer 
retryTimer;
+050  private final AsyncMetaRegionLocator 
metaRegionLocator;
 051
-052  private final AsyncMetaRegionLocator 
metaRegionLocator;
+052  private final AsyncNonMetaRegionLocator 
nonMetaRegionLocator;
 053
-054  private final AsyncNonMetaRegionLocator 
nonMetaRegionLocator;
-055
-056  AsyncRegionLocator(AsyncConnectionImpl 
conn, HashedWheelTimer retryTimer) {
-057this.metaRegionLocator = new 
AsyncMetaRegionLocator(conn.registry);
-058this.nonMetaRegionLocator = new 
AsyncNonMetaRegionLocator(conn);
-059this.retryTimer = retryTimer;
-060  }
-061
-062  private 
CompletableFutureHRegionLocation 
withTimeout(CompletableFutureHRegionLocation future,
-063  long timeoutNs, 
SupplierString timeoutMsg) {
-064if (future.isDone() || timeoutNs 
= 0) {
-065  return future;
-066}
-067Timeout timeoutTask = 
retryTimer.newTimeout(t - {
-068  if (future.isDone()) {
-069return;
-070  }
-071  future.completeExceptionally(new 
TimeoutIOException(timeoutMsg.get()));
-072}, timeoutNs, 
TimeUnit.NANOSECONDS);
-073return future.whenComplete((loc, 
error) - {
-074  if (error != null  
error.getClass() != TimeoutIOException.class) {
-075// cancel timeout task if we are 
not completed by it.
-076timeoutTask.cancel();
-077  }
-078});
-079  }
-080
-081  
CompletableFutureHRegionLocation getRegionLocation(TableName tableName, 
byte[] row,
-082  RegionLocateType type, boolean 
reload, long timeoutNs) {
-083// meta region can not be split right 
now so we always call the same method.
-084// Change it later if the meta table 
can have more than one regions.
-085
CompletableFutureHRegionLocation future =
-086tableName.equals(META_TABLE_NAME) 
? metaRegionLocator.getRegionLocation(reload)
-087: 
nonMetaRegionLocator.getRegionLocation(tableName, row, type, reload);
-088return withTimeout(future, 
timeoutNs,

[22/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/IncrementCoalescer.FullyQualifiedRow.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/IncrementCoalescer.FullyQualifiedRow.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/IncrementCoalescer.FullyQualifiedRow.html
index d7e790d..1648cbc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/IncrementCoalescer.FullyQualifiedRow.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/IncrementCoalescer.FullyQualifiedRow.html
@@ -41,203 +41,203 @@
 033import 
java.util.concurrent.atomic.LongAdder;
 034import 
org.apache.hadoop.hbase.CellUtil;
 035import 
org.apache.hadoop.hbase.client.Table;
-036import 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler;
-037import 
org.apache.hadoop.hbase.thrift.generated.TIncrement;
-038import 
org.apache.hadoop.hbase.util.Bytes;
-039import 
org.apache.hadoop.hbase.util.Threads;
-040import 
org.apache.hadoop.metrics2.util.MBeans;
-041import org.apache.thrift.TException;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046/**
-047 * This class will coalesce increments 
from a thift server if
-048 * 
hbase.regionserver.thrift.coalesceIncrement is set to true. Turning this
-049 * config to true will cause the thrift 
server to queue increments into an
-050 * instance of this class. The thread 
pool associated with this class will drain
-051 * the coalesced increments as the thread 
is able. This can cause data loss if the
-052 * thrift server dies or is shut down 
before everything in the queue is drained.
-053 *
-054 */
-055@InterfaceAudience.Private
-056public class IncrementCoalescer 
implements IncrementCoalescerMBean {
-057
-058  /**
-059   * Used to identify a cell that will be 
incremented.
-060   *
-061   */
-062  static class FullyQualifiedRow {
-063private byte[] table;
-064private byte[] rowKey;
-065private byte[] family;
-066private byte[] qualifier;
-067
-068public FullyQualifiedRow(byte[] 
table, byte[] rowKey, byte[] fam, byte[] qual) {
-069  super();
-070  this.table = table;
-071  this.rowKey = rowKey;
-072  this.family = fam;
-073  this.qualifier = qual;
-074}
-075
-076public byte[] getTable() {
-077  return table;
-078}
-079
-080public void setTable(byte[] table) 
{
-081  this.table = table;
-082}
-083
-084public byte[] getRowKey() {
-085  return rowKey;
-086}
-087
-088public void setRowKey(byte[] rowKey) 
{
-089  this.rowKey = rowKey;
-090}
-091
-092public byte[] getFamily() {
-093  return family;
-094}
-095
-096public void setFamily(byte[] fam) {
-097  this.family = fam;
-098}
-099
-100public byte[] getQualifier() {
-101  return qualifier;
-102}
-103
-104public void setQualifier(byte[] qual) 
{
-105  this.qualifier = qual;
-106}
-107
-108@Override
-109public int hashCode() {
-110  final int prime = 31;
-111  int result = 1;
-112  result = prime * result + 
Arrays.hashCode(family);
-113  result = prime * result + 
Arrays.hashCode(qualifier);
-114  result = prime * result + 
Arrays.hashCode(rowKey);
-115  result = prime * result + 
Arrays.hashCode(table);
-116  return result;
-117}
-118
-119@Override
-120public boolean equals(Object obj) {
-121  if (this == obj) {
-122return true;
-123  }
-124  if (obj == null) {
-125return false;
-126  }
-127  if (getClass() != obj.getClass()) 
{
-128return false;
-129  }
-130
-131  FullyQualifiedRow other = 
(FullyQualifiedRow) obj;
-132
-133  if (!Arrays.equals(family, 
other.family)) {
-134return false;
-135  }
-136  if (!Arrays.equals(qualifier, 
other.qualifier)) {
-137return false;
-138  }
-139  if (!Arrays.equals(rowKey, 
other.rowKey)) {
-140return false;
-141  }
-142  if (!Arrays.equals(table, 
other.table)) {
-143return false;
-144  }
-145  return true;
-146}
-147
-148  }
-149
-150  static class DaemonThreadFactory 
implements ThreadFactory {
-151static final AtomicInteger poolNumber 
= new AtomicInteger(1);
-152final ThreadGroup group;
-153final AtomicInteger threadNumber = 
new AtomicInteger(1);
-154final String namePrefix;
-155
-156DaemonThreadFactory() {
-157  SecurityManager s = 
System.getSecurityManager();
-158  group = (s != null) ? 
s.getThreadGroup() : Thread.currentThread().getThreadGroup();
-159  namePrefix = "ICV-" + 
poolNumber.getAndIncrement() + "-thread-";
-160}
-161
-162@Override
-163public Thread newThread(Runnable r) 
{
-164  Thread t = new Thread(group, r, 
namePrefix + threadNumber.getAndIncrement(), 0);
-165
-166  

[22/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html
index 77ae857..561a89b 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html
@@ -372,6 +372,6 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
index 96634ca..f118147 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
@@ -767,6 +767,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/HasMasterServices.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/HasMasterServices.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/HasMasterServices.html
index 653de2e..70ed022 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/HasMasterServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/HasMasterServices.html
@@ -240,6 +240,6 @@ public interface Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.html
index d0c73f2..833534a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.html
@@ -240,6 +240,6 @@ public interface Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.html
index 9314910..3f33174 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.html
@@ -265,6 +265,6 @@ extends 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
index ef75d67..50686f2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
@@ -343,6 +343,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html

[22/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
index fbdee47..2aec855 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":9,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ThriftHBaseServiceHandler
+public class ThriftHBaseServiceHandler
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements org.apache.hadoop.hbase.thrift2.generated.THBaseService.Iface
 This class is a glue object that connects Thrift RPC calls 
to the HBase client API primarily
@@ -226,17 +226,22 @@ implements 
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Iface
 Method and Description
 
 
+void
+addColumnFamily(org.apache.hadoop.hbase.thrift2.generated.TTableNametableName,
+   
org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptorcolumn)
+
+
 private int
 addScanner(ResultScannerscanner)
 Assigns a unique ID to the scanner and adds the mapping to 
an internal HashMap.
 
 
-
+
 org.apache.hadoop.hbase.thrift2.generated.TResult
 append(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffertable,
   
org.apache.hadoop.hbase.thrift2.generated.TAppendappend)
 
-
+
 boolean
 checkAndDelete(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffertable,
   https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
@@ -248,7 +253,7 @@ implements 
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Iface
  value.
 
 
-
+
 boolean
 checkAndMutate(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffertable,
   https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
@@ -261,7 +266,7 @@ implements 
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Iface
  value.
 
 
-
+
 boolean
 checkAndPut(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffertable,
https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
@@ -273,73 +278,107 @@ implements 
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Iface
  value.
 
 
-
+
 private void
 checkReadOnlyMode()
 
-
+
 void
 closeScanner(intscannerId)
 Closes the scanner.
 
 
-
+
 private void
 closeTable(Tabletable)
 
-
+
+void
+createNamespace(org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptornamespaceDesc)
+
+
+void
+createTable(org.apache.hadoop.hbase.thrift2.generated.TTableDescriptordesc,
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBuffersplitKeys)
+
+
+void
+deleteColumnFamily(org.apache.hadoop.hbase.thrift2.generated.TTableNametableName,
+  https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBuffercolumn)
+
+
 

[22/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index f12e2ad..075304a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RSRpcServices
+public class RSRpcServices
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HBaseRPCErrorHandler, 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
 PriorityFunction, ConfigurationObserver
 Implements the regionserver RPC services.
@@ -1016,7 +1016,7 @@ implements 
 
 LOG
-protected static finalorg.slf4j.Logger LOG
+protected static finalorg.slf4j.Logger LOG
 
 
 
@@ -1025,7 +1025,7 @@ implements 
 
 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
 RPC scheduler to use for the region server.
 
 See Also:
@@ -1039,7 +1039,7 @@ implements 
 
 MASTER_RPC_SCHEDULER_FACTORY_CLASS
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER_RPC_SCHEDULER_FACTORY_CLASS
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER_RPC_SCHEDULER_FACTORY_CLASS
 RPC scheduler to use for the master.
 
 See Also:
@@ -1053,7 +1053,7 @@ implements 
 
 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Minimum allowable time limit delta (in milliseconds) that 
can be enforced during scans. This
  configuration exists to prevent the scenario where a time limit is specified 
to be so
  restrictive that the time limit is reached immediately (before any cells are 
scanned).
@@ -1069,7 +1069,7 @@ implements 
 
 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static finallong DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static finallong DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Default value of REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 
 See Also:
@@ -1083,7 +1083,7 @@ implements 
 
 BATCH_ROWS_THRESHOLD_NAME
-static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BATCH_ROWS_THRESHOLD_NAME
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BATCH_ROWS_THRESHOLD_NAME
 Number of rows in a batch operation above which a warning 
will be logged.
 
 See Also:
@@ -1097,7 +1097,7 @@ implements 
 
 BATCH_ROWS_THRESHOLD_DEFAULT
-static finalint BATCH_ROWS_THRESHOLD_DEFAULT
+static finalint BATCH_ROWS_THRESHOLD_DEFAULT
 Default value of BATCH_ROWS_THRESHOLD_NAME
 
 See Also:
@@ -,7 +,7 @@ implements 
 
 RESERVOIR_ENABLED_KEY
-protected static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESERVOIR_ENABLED_KEY
+protected static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESERVOIR_ENABLED_KEY
 
 See Also:
 Constant
 Field Values
@@ -1124,7 +1124,7 @@ implements 
 
 requestCount
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
 
 
 
@@ -1133,7 +1133,7 @@ implements 
 
 

[22/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
index 6f82cee..3bf3150 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
@@ -40,162 +40,156 @@
 032import 
org.apache.hadoop.hbase.client.ClusterConnection;
 033import 
org.apache.hadoop.hbase.client.Connection;
 034import 
org.apache.hadoop.hbase.util.FSUtils;
-035import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-036import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-037import org.apache.hadoop.util.Tool;
-038import 
org.apache.hadoop.util.ToolRunner;
-039import 
org.apache.yetus.audience.InterfaceAudience;
-040
-041/**
-042 * In a scenario of Replication based 
Disaster/Recovery, when hbase Master-Cluster crashes, this
-043 * tool is used to sync-up the delta from 
Master to Slave using the info from ZooKeeper. The tool
-044 * will run on Master-Cluser, and assume 
ZK, Filesystem and NetWork still available after hbase
-045 * crashes
-046 *
-047 * pre
-048 * hbase 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp
-049 * /pre
-050 */
-051@InterfaceAudience.Private
-052public class ReplicationSyncUp extends 
Configured implements Tool {
-053
-054  private static final long SLEEP_TIME = 
1;
-055
-056  /**
-057   * Main program
-058   */
-059  public static void main(String[] args) 
throws Exception {
-060int ret = 
ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args);
-061System.exit(ret);
-062  }
-063
-064  @Override
-065  public int run(String[] args) throws 
Exception {
-066Abortable abortable = new Abortable() 
{
-067  @Override
-068  public void abort(String why, 
Throwable e) {
-069  }
-070
-071  @Override
-072  public boolean isAborted() {
-073return false;
-074  }
-075};
-076Configuration conf = getConf();
-077try (ZKWatcher zkw =
-078  new ZKWatcher(conf, 
"syncupReplication" + System.currentTimeMillis(), abortable, true)) {
-079  Path walRootDir = 
FSUtils.getWALRootDir(conf);
-080  FileSystem fs = 
FSUtils.getWALFileSystem(conf);
-081  Path oldLogDir = new 
Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-082  Path logDir = new Path(walRootDir, 
HConstants.HREGION_LOGDIR_NAME);
-083
-084  System.out.println("Start 
Replication Server start");
-085  Replication replication = new 
Replication();
-086  replication.initialize(new 
DummyServer(zkw), fs, logDir, oldLogDir, null);
-087  ReplicationSourceManager manager = 
replication.getReplicationManager();
-088  manager.init().get();
-089  while 
(manager.activeFailoverTaskCount()  0) {
-090Thread.sleep(SLEEP_TIME);
-091  }
-092  while 
(manager.getOldSources().size()  0) {
-093Thread.sleep(SLEEP_TIME);
-094  }
-095  manager.join();
-096} catch (InterruptedException e) {
-097  System.err.println("didn't wait 
long enough:" + e);
-098  return -1;
-099}
-100return 0;
-101  }
-102
-103  class DummyServer implements Server {
-104String hostname;
-105ZKWatcher zkw;
-106
-107DummyServer(ZKWatcher zkw) {
-108  // an unique name in case the first 
run fails
-109  hostname = 
System.currentTimeMillis() + ".SyncUpTool.replication.org";
-110  this.zkw = zkw;
-111}
-112
-113DummyServer(String hostname) {
-114  this.hostname = hostname;
-115}
-116
-117@Override
-118public Configuration 
getConfiguration() {
-119  return getConf();
-120}
-121
-122@Override
-123public ZKWatcher getZooKeeper() {
-124  return zkw;
-125}
-126
-127@Override
-128public CoordinatedStateManager 
getCoordinatedStateManager() {
-129  return null;
-130}
-131
-132@Override
-133public MetaTableLocator 
getMetaTableLocator() {
-134  return null;
-135}
-136
-137@Override
-138public ServerName getServerName() {
-139  return ServerName.valueOf(hostname, 
1234, 1L);
-140}
-141
-142@Override
-143public void abort(String why, 
Throwable e) {
-144}
-145
-146@Override
-147public boolean isAborted() {
-148  return false;
-149}
-150
-151@Override
-152public void stop(String why) {
-153}
-154
-155@Override
-156public boolean isStopped() {
-157  return false;
-158}
-159
-160@Override
-161public ClusterConnection 
getConnection() {
-162  return null;
-163}
-164
-165

[22/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.html
index 416b70e..68e6797 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.html
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 
@@ -134,6 +134,28 @@ public interface 
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static TableName
+DUMMY_NAMESPACE_TABLE_NAME
+Used for acquire/release lock for namespace related 
operations, just a place holder as we do
+ not have namespace table any more.
+
+
+
+
+
 
 
 
@@ -165,6 +187,25 @@ public interface 
 
 
+
+
+
+
+
+Field Detail
+
+
+
+
+
+DUMMY_NAMESPACE_TABLE_NAME
+static finalTableName DUMMY_NAMESPACE_TABLE_NAME
+Used for acquire/release lock for namespace related 
operations, just a place holder as we do
+ not have namespace table any more.
+
+
+
+
 
 
 
@@ -177,7 +218,7 @@ public interface 
 
 getTableName
-TableNamegetTableName()
+TableNamegetTableName()
 
 Returns:
 the name of the table the procedure is operating on
@@ -190,7 +231,7 @@ public interface 
 
 getTableOperationType
-TableProcedureInterface.TableOperationTypegetTableOperationType()
+TableProcedureInterface.TableOperationTypegetTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
  e.g. if we get a delete and we have some table operation pending (e.g. add 
column)
  we can abort those operations.
@@ -254,13 +295,13 @@ public interface 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
index 4aa9829..e0d8887 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
@@ -213,6 +213,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 



[22/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessController.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessController.html
index 25b7848..5c428b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessController.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -245,20 +245,20 @@
 237return regionEnv != null ? 
regionEnv.getRegion() : null;
 238  }
 239
-240  public TableAuthManager 
getAuthManager() {
+240  public AuthManager getAuthManager() {
 241return 
accessChecker.getAuthManager();
 242  }
 243
 244  private void 
initialize(RegionCoprocessorEnvironment e) throws IOException {
 245final Region region = 
e.getRegion();
 246Configuration conf = 
e.getConfiguration();
-247Mapbyte[], 
ListMultimapString, TablePermission tables = 
AccessControlLists.loadAll(region);
+247Mapbyte[], 
ListMultimapString, UserPermission tables = 
AccessControlLists.loadAll(region);
 248// For each table, write out the 
table's permissions to the respective
 249// znode for that table.
-250for (Map.Entrybyte[], 
ListMultimapString,TablePermission t:
+250for (Map.Entrybyte[], 
ListMultimapString, UserPermission t:
 251  tables.entrySet()) {
 252  byte[] entry = t.getKey();
-253  
ListMultimapString,TablePermission perms = t.getValue();
+253  ListMultimapString, 
UserPermission perms = t.getValue();
 254  byte[] serialized = 
AccessControlLists.writePermissionsAsBytes(perms, conf);
 255  
getAuthManager().getZKPermissionWatcher().writeToZookeeper(entry, 
serialized);
 256}
@@ -294,7 +294,7 @@
 286try (Table t = 
e.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME)) {
 287  for (byte[] entry : entries) {
 288currentEntry = entry;
-289ListMultimapString, 
TablePermission perms =
+289ListMultimapString, 
UserPermission perms =
 290
AccessControlLists.getPermissions(conf, entry, t, null, null, null, false);
 291byte[] serialized = 
AccessControlLists.writePermissionsAsBytes(perms, conf);
 292zkw.writeToZookeeper(entry, 
serialized);
@@ -338,7 +338,7 @@
 330}
 331
 332// 2. check for the table-level, if 
successful we can short-circuit
-333if (getAuthManager().authorize(user, 
tableName, (byte[])null, permRequest)) {
+333if 
(getAuthManager().authorizeUserTable(user, tableName, permRequest)) {
 334  return AuthResult.allow(request, 
"Table permission granted", user,
 335permRequest, tableName, 
families);
 336}
@@ -348,7 +348,7 @@
 340  // all families must pass
 341  for (Map.Entrybyte [], ? 
extends Collection? family : families.entrySet()) {
 342// a) check for family level 
access
-343if 
(getAuthManager().authorize(user, tableName, family.getKey(),
+343if 
(getAuthManager().authorizeUserTable(user, tableName, family.getKey(),
 344permRequest)) {
 345  continue;  // family-level 
permission overrides per-qualifier
 346}
@@ -359,17 +359,17 @@
 351// for each qualifier of the 
family
 352Setbyte[] familySet = 
(Setbyte[])family.getValue();
 353for (byte[] qualifier : 
familySet) {
-354  if 
(!getAuthManager().authorize(user, tableName, family.getKey(),
-355 
qualifier, permRequest)) {
+354  if 
(!getAuthManager().authorizeUserTable(user, tableName,
+355family.getKey(), 
qualifier, permRequest)) {
 356return 
AuthResult.deny(request, "Failed qualifier check", user,
-357permRequest, 
tableName, makeFamilyMap(family.getKey(), qualifier));
+357  permRequest, tableName, 
makeFamilyMap(family.getKey(), qualifier));
 358  }
 359}
 360  } else if (family.getValue() 
instanceof List) { // ListCell
 361ListCell cellList = 
(ListCell)family.getValue();
 362for (Cell cell : cellList) 
{
-363  if 
(!getAuthManager().authorize(user, tableName, family.getKey(),
-364
CellUtil.cloneQualifier(cell), permRequest)) {
+363  if 
(!getAuthManager().authorizeUserTable(user, tableName, family.getKey(),
+364  
CellUtil.cloneQualifier(cell), permRequest)) {
 365return 
AuthResult.deny(request, "Failed qualifier check", user, permRequest,
 366  tableName, 
makeFamilyMap(family.getKey(), CellUtil.cloneQualifier(cell)));
 367  }
@@ -378,7 +378,7 @@
 370} else {
 371  // no qualifiers and 
family-level check already failed
 372  return 

[22/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index b2a9771..bf81ebb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -46,3768 +46,3806 @@
 038import java.util.Objects;
 039import java.util.Set;
 040import java.util.SortedMap;
-041import java.util.TreeMap;
-042import java.util.TreeSet;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListMap;
-046import 
java.util.concurrent.atomic.AtomicBoolean;
-047import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-048import java.util.function.Function;
-049import 
javax.management.MalformedObjectNameException;
-050import javax.servlet.http.HttpServlet;
-051import 
org.apache.commons.lang3.RandomUtils;
-052import 
org.apache.commons.lang3.StringUtils;
-053import 
org.apache.commons.lang3.SystemUtils;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import 
org.apache.hadoop.hbase.Abortable;
-058import 
org.apache.hadoop.hbase.CacheEvictionStats;
-059import 
org.apache.hadoop.hbase.ChoreService;
-060import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-061import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-062import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-063import 
org.apache.hadoop.hbase.HBaseConfiguration;
-064import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-065import 
org.apache.hadoop.hbase.HConstants;
-066import 
org.apache.hadoop.hbase.HealthCheckChore;
-067import 
org.apache.hadoop.hbase.MetaTableAccessor;
-068import 
org.apache.hadoop.hbase.NotServingRegionException;
-069import 
org.apache.hadoop.hbase.PleaseHoldException;
-070import 
org.apache.hadoop.hbase.ScheduledChore;
-071import 
org.apache.hadoop.hbase.ServerName;
-072import 
org.apache.hadoop.hbase.Stoppable;
-073import 
org.apache.hadoop.hbase.TableDescriptors;
-074import 
org.apache.hadoop.hbase.TableName;
-075import 
org.apache.hadoop.hbase.YouAreDeadException;
-076import 
org.apache.hadoop.hbase.ZNodeClearer;
-077import 
org.apache.hadoop.hbase.client.ClusterConnection;
-078import 
org.apache.hadoop.hbase.client.Connection;
-079import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-080import 
org.apache.hadoop.hbase.client.RegionInfo;
-081import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-082import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-083import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-085import 
org.apache.hadoop.hbase.client.locking.LockServiceClient;
-086import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-087import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-088import 
org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
-089import 
org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-091import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-092import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-093import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorService;
-095import 
org.apache.hadoop.hbase.executor.ExecutorType;
-096import 
org.apache.hadoop.hbase.fs.HFileSystem;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-099import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-100import 
org.apache.hadoop.hbase.io.hfile.HFile;
-101import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-102import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-103import 
org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
-104import 
org.apache.hadoop.hbase.ipc.RpcClient;
-105import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-106import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-107import 
org.apache.hadoop.hbase.ipc.RpcServer;
-108import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-109import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-110import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.HMaster;
-113import 
org.apache.hadoop.hbase.master.LoadBalancer;
-114import 
org.apache.hadoop.hbase.master.RegionState.State;
-115import 

[22/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }

[22/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/allclasses-frame.html
--
diff --git a/testdevapidocs/allclasses-frame.html 
b/testdevapidocs/allclasses-frame.html
index 1e6ee81..054bf8a 100644
--- a/testdevapidocs/allclasses-frame.html
+++ b/testdevapidocs/allclasses-frame.html
@@ -2417,6 +2417,8 @@
 TestWALProcedureStore
 TestWALProcedureStore.TestSequentialProcedure
 TestWALProcedureStoreOnHDFS
+TestWALProcedureTree
+TestWALProcedureTree.TestProcedure
 TestWALReaderOnSecureWAL
 TestWALRecordReader
 TestWALReplay

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/allclasses-noframe.html
--
diff --git a/testdevapidocs/allclasses-noframe.html 
b/testdevapidocs/allclasses-noframe.html
index 131a5cd..94cee3ff 100644
--- a/testdevapidocs/allclasses-noframe.html
+++ b/testdevapidocs/allclasses-noframe.html
@@ -2417,6 +2417,8 @@
 TestWALProcedureStore
 TestWALProcedureStore.TestSequentialProcedure
 TestWALProcedureStoreOnHDFS
+TestWALProcedureTree
+TestWALProcedureTree.TestProcedure
 TestWALReaderOnSecureWAL
 TestWALRecordReader
 TestWALReplay

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index c78f132..8909b71 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -140,6 +140,8 @@
 
 abort(Void)
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore.TestSequentialProcedure
 
+abort(Void)
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree.TestProcedure
+
 abort(TestChildProcedures.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestChildProcedures.TestChildProcedure
 
 abort(TestChildProcedures.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestChildProcedures.TestRootProcedure
@@ -843,6 +845,8 @@
 
 addStackId(int)
 - Method in class org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure
 
+addStackIndex(int)
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree.TestProcedure
+
 addStartEndKeysForTest(TreeMapbyte[],
 Integer, byte[], byte[]) - Method in class 
org.apache.hadoop.hbase.tool.TestLoadIncrementalHFiles
 
 addStateAndBarrier(RegionInfo,
 RegionState.State, long...) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationChecker
@@ -1566,7 +1570,7 @@
 
 assertCounterLt(String,
 long, BaseSource) - Method in class org.apache.hadoop.hbase.test.MetricsAssertHelperImpl
 
-assertDeleted(ProcedureStoreTracker,
 Procedure[], int[], int[]) - Static method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
+assertDeleted(ProcedureStoreTracker,
 Procedure?[], int[], int[]) - Static method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
 
 assertDisallow(Table,
 TestSyncReplicationStandBy.TableAction) - Method in class 
org.apache.hadoop.hbase.replication.TestSyncReplicationStandBy
 
@@ -1576,8 +1580,6 @@
 
 Validate that result contains two specified keys, 
exactly.
 
-assertEmptyLogDir()
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
-
 assertEmptyMetaLocation(Table,
 byte[], int) - Static method in class org.apache.hadoop.hbase.TestMetaTableAccessor
 
 assertEmptyResult(Result)
 - Method in class org.apache.hadoop.hbase.client.TestFromClientSide
@@ -1973,7 +1975,7 @@
 
 Assert that the result of 
HttpServer.toString() contains the specific text
 
-assertUpdated(ProcedureStoreTracker,
 Procedure[], int[], int[]) - Static method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
+assertUpdated(ProcedureStoreTracker,
 Procedure?[], int[], int[]) - Static method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
 
 assertUsageContent(String)
 - Method in class org.apache.hadoop.hbase.mapreduce.TestRowCounter
 
@@ -5493,6 +5495,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureStore
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestChildProcedures
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass
@@ -10164,6 +10168,8 @@
 
 createPreSplitTable(HBaseTestingUtility,
 TableName, int, byte[]...) - Static method in class 
org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils
 
+createProc(long,
 long) - Method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree
+
 createPut(int,
 boolean) - Method in class 

[22/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
index 531216d..293a46d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -245,62 +245,67 @@ extends Method and Description
 
 
+private boolean
+canSchedule(MasterProcedureEnvenv,
+   HRegionLocationloc)
+
+
 protected void
 deserializeStateData(ProcedureStateSerializerserializer)
 Called on store load to allow the user to decode the 
previously serialized
  state.
 
 
-
+
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsStatestate)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState
 getState(intstateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsStatestate)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
+
 TableName
 getTableName()
 
-
+
 TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected void
 rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsStatestate)
 called to perform the rollback of the specified state
 
 
-
+
 protected void
 serializeStateData(ProcedureStateSerializerserializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
-
+
 protected boolean
 setTimeoutFailure(MasterProcedureEnvenv)
 At end of timeout, wake ourselves up so we run again.
@@ -326,7 +331,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, 
incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, holdLock,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting, removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey, setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 

[22/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.html
new file mode 100644
index 000..dd12f43
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClientSimple.html
@@ -0,0 +1,423 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestMobRestoreSnapshotFromClientSimple (Apache HBase 3.0.0-SNAPSHOT 
Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":9,"i3":9,"i4":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class 
TestMobRestoreSnapshotFromClientSimple
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientSimpleTestBase
+
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSimple
+
+
+
+
+
+
+
+
+
+
+
+
+public class TestMobRestoreSnapshotFromClientSimple
+extends RestoreSnapshotFromClientSimpleTestBase
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+admin,
 emptySnapshot,
 FAMILY,
 name,
 snapshot0Rows,
 snapshot1Rows,
 snapshotName0,
 snapshotName1,
 snapshotName2,
 tableName,
 TEST_FAMILY2,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestMobRestoreSnapshotFromClientSimple()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected int
+countRows(org.apache.hadoop.hbase.client.Tabletable,
+ byte[]...families)
+
+
+protected void
+createTable()
+
+
+static void
+setupCluster()
+
+
+protected static void
+setupConf(org.apache.hadoop.conf.Configurationconf)
+
+
+protected void
+verifyRowCount(HBaseTestingUtilityutil,
+  org.apache.hadoop.hbase.TableNametableName,
+  longexpectedRows)
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientSimpleTestBase
+testCorruptedSnapshot,
 testRestoreSnapshot
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+getNumReplicas,
 getValidMethodName,
 setup,
 splitRegion,
 tearDown,
 tearDownAfterClass
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 

[22/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/BitSetNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/BitSetNode.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/BitSetNode.html
index 8dc2d73..ecd1970 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/BitSetNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/BitSetNode.html
@@ -25,384 +25,407 @@
 017 */
 018package 
org.apache.hadoop.hbase.procedure2.store;
 019
-020import java.util.Arrays;
-021import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker.DeleteState;
-022import 
org.apache.yetus.audience.InterfaceAudience;
-023
-024import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+020import java.util.ArrayList;
+021import java.util.Arrays;
+022import java.util.List;
+023import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker.DeleteState;
+024import 
org.apache.yetus.audience.InterfaceAudience;
 025
-026/**
-027 * A bitmap which can grow/merge with 
other {@link BitSetNode} (if certain conditions are met).
-028 * Boundaries of bitmap are aligned to 
multiples of {@link BitSetNode#BITS_PER_WORD}. So the range
-029 * of a {@link BitSetNode} is from [x * 
K, y * K) where x and y are integers, y  x and K is
-030 * BITS_PER_WORD.
-031 * p/
-032 * We have two main bit sets to describe 
the state of procedures, the meanings are:
-033 *
-034 * pre
-035 *  --
-036 * | modified | deleted |  meaning
-037 * | 0|   0 |  proc exists, 
but hasn't been updated since last resetUpdates().
-038 * | 1|   0 |  proc was 
updated (but not deleted).
-039 * | 1|   1 |  proc was 
deleted.
-040 * | 0|   1 |  proc doesn't 
exist (maybe never created, maybe deleted in past).
-041 * --
-042 * /pre
-043 *
-044 * The meaning of modified is that, we 
have modified the state of the procedure, no matter insert,
-045 * update, or delete. And if it is an 
insert or update, we will set the deleted to 0, if not we will
-046 * set the delete to 1.
-047 * p/
-048 * For a non-partial BitSetNode, the 
initial modified value is 0 and deleted value is 1. For the
-049 * partial one, the initial modified 
value is 0 and the initial deleted value is also 0. In
-050 * {@link #unsetPartialFlag()} we will 
reset the deleted to 1 if it is not modified.
-051 */
-052@InterfaceAudience.Private
-053class BitSetNode {
-054  private static final long WORD_MASK = 
0xL;
-055  private static final int 
ADDRESS_BITS_PER_WORD = 6;
-056  private static final int BITS_PER_WORD 
= 1  ADDRESS_BITS_PER_WORD;
-057  private static final int MAX_NODE_SIZE 
= 1  ADDRESS_BITS_PER_WORD;
-058
-059  /**
-060   * Mimics {@link 
ProcedureStoreTracker#partial}. It will effect how we fill the new deleted 
bits
-061   * when growing.
-062   */
-063  private boolean partial;
-064
-065  /**
-066   * Set of procedures which have been 
modified since last {@link #resetModified()}. Useful to track
-067   * procedures which have been modified 
since last WAL write.
-068   */
-069  private long[] modified;
-070
-071  /**
-072   * Keeps track of procedure ids which 
belong to this bitmap's range and have been deleted. This
-073   * represents global state since it's 
not reset on WAL rolls.
-074   */
-075  private long[] deleted;
-076  /**
-077   * Offset of bitmap i.e. procedure id 
corresponding to first bit.
-078   */
-079  private long start;
-080
-081  public void dump() {
-082System.out.printf("%06d:%06d min=%d 
max=%d%n", getStart(), getEnd(), getActiveMinProcId(),
-083  getActiveMaxProcId());
-084System.out.println("Modified:");
-085for (int i = 0; i  
modified.length; ++i) {
-086  for (int j = 0; j  
BITS_PER_WORD; ++j) {
-087System.out.print((modified[i] 
 (1L  j)) != 0 ? "1" : "0");
-088  }
-089  System.out.println(" " + i);
-090}
-091System.out.println();
-092System.out.println("Delete:");
-093for (int i = 0; i  
deleted.length; ++i) {
-094  for (int j = 0; j  
BITS_PER_WORD; ++j) {
-095System.out.print((deleted[i] 
 (1L  j)) != 0 ? "1" : "0");
-096  }
-097  System.out.println(" " + i);
-098}
-099System.out.println();
-100  }
-101
-102  public BitSetNode(long procId, boolean 
partial) {
-103start = alignDown(procId);
-104
-105int count = 1;
-106modified = new long[count];
-107deleted = new long[count];
-108if (!partial) {
-109  Arrays.fill(deleted, WORD_MASK);
-110}
-111
-112this.partial = partial;
-113updateState(procId, false);
-114  }
-115
-116  public 
BitSetNode(ProcedureProtos.ProcedureStoreTracker.TrackerNode data) {
-117start = data.getStartId();
-118int size = data.getUpdatedCount();
-119assert 

[22/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the 

[22/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index dd498f4..d854518 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -1745,1060 +1745,1066 @@
 1737
 1738  /**
 1739   * Abort a procedure.
-1740   * @param procId ID of the procedure 
to abort
-1741   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-1742   * @return 
codetrue/code if aborted, codefalse/code if 
procedure already completed or does not exist
-1743   * @throws IOException
-1744   */
-1745  boolean abortProcedure(
-1746  long procId,
-1747  boolean mayInterruptIfRunning) 
throws IOException;
-1748
-1749  /**
-1750   * Abort a procedure but does not 
block and wait for completion.
-1751   * You can use Future.get(long, 
TimeUnit) to wait on the operation to complete.
-1752   * It may throw ExecutionException if 
there was an error while executing the operation
-1753   * or TimeoutException in case the 
wait timeout was not long enough to allow the
-1754   * operation to complete.
-1755   *
-1756   * @param procId ID of the procedure 
to abort
-1757   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-1758   * @return 
codetrue/code if aborted, codefalse/code if 
procedure already completed or does not exist
-1759   * @throws IOException
-1760   */
-1761  FutureBoolean 
abortProcedureAsync(
-1762long procId,
-1763boolean mayInterruptIfRunning) 
throws IOException;
-1764
-1765  /**
-1766   * Get procedures.
-1767   * @return procedure list in JSON
-1768   * @throws IOException
-1769   */
-1770  String getProcedures() throws 
IOException;
-1771
-1772  /**
-1773   * Get locks.
-1774   * @return lock list in JSON
-1775   * @throws IOException if a remote or 
network exception occurs
-1776   */
-1777  String getLocks() throws 
IOException;
-1778
-1779  /**
-1780   * Roll the log writer. I.e. for 
filesystem based write ahead logs, start writing to a new file.
-1781   *
-1782   * Note that the actual rolling of the 
log writer is asynchronous and may not be complete when
-1783   * this method returns. As a side 
effect of this call, the named region server may schedule
-1784   * store flushes at the request of the 
wal.
-1785   *
-1786   * @param serverName The servername of 
the regionserver.
-1787   * @throws IOException if a remote or 
network exception occurs
-1788   * @throws 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
-1789   */
-1790  void rollWALWriter(ServerName 
serverName) throws IOException, FailedLogCloseException;
-1791
-1792  /**
-1793   * Helper that delegates to 
getClusterMetrics().getMasterCoprocessorNames().
-1794   * @return an array of master 
coprocessors
-1795   * @see 
org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
-1796   * @deprecated since 2.0 version and 
will be removed in 3.0 version.
-1797   * use {@link 
#getMasterCoprocessorNames()}
-1798   */
-1799  @Deprecated
-1800  default String[] 
getMasterCoprocessors() throws IOException {
-1801return 
getMasterCoprocessorNames().stream().toArray(size - new String[size]);
-1802  }
-1803
-1804  /**
-1805   * Helper that delegates to 
getClusterMetrics().getMasterCoprocessorNames().
-1806   * @return an array of master 
coprocessors
-1807   * @see 
org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
-1808   */
-1809  default ListString 
getMasterCoprocessorNames() throws IOException {
-1810return 
getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS))
-1811  .getMasterCoprocessorNames();
-1812  }
-1813
-1814  /**
-1815   * Get the current compaction state of 
a table. It could be in a major compaction, a minor
-1816   * compaction, both, or none.
-1817   *
-1818   * @param tableName table to examine
-1819   * @return the current compaction 
state
-1820   * @throws IOException if a remote or 
network exception occurs
-1821   */
-1822  CompactionState 
getCompactionState(TableName tableName) throws IOException;
-1823
-1824  /**
-1825   * Get the current compaction state of 
a table. It could be in a compaction, or none.
-1826   *
-1827   * @param tableName table to examine
-1828   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
-1829   * @return the current compaction 
state
-1830   * @throws IOException if a remote or 
network exception occurs
-1831   */
-1832  CompactionState 
getCompactionState(TableName tableName,
-1833CompactType compactType) throws 
IOException;
-1834
-1835  /**
-1836   * Get the current compaction state of 
region. It could be in a major compaction, a minor

[22/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 0cf012a..976894f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -63,3884 +63,3883 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058
-059import 
org.apache.commons.lang3.StringUtils;
-060import 
org.apache.hadoop.conf.Configuration;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.ChoreService;
-063import 
org.apache.hadoop.hbase.ClusterId;
-064import 
org.apache.hadoop.hbase.ClusterMetrics;
-065import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-066import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-067import 
org.apache.hadoop.hbase.CompoundConfiguration;
-068import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-069import 
org.apache.hadoop.hbase.HBaseIOException;
-070import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-071import 
org.apache.hadoop.hbase.HConstants;
-072import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-073import 
org.apache.hadoop.hbase.MasterNotRunningException;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.TableDescriptors;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.TableNotDisabledException;
-082import 
org.apache.hadoop.hbase.TableNotFoundException;
-083import 
org.apache.hadoop.hbase.UnknownRegionException;
-084import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-085import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-086import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-087import 
org.apache.hadoop.hbase.client.RegionInfo;
-088import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-089import 
org.apache.hadoop.hbase.client.Result;
-090import 
org.apache.hadoop.hbase.client.TableDescriptor;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.TableState;
-093import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-094import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-095import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-096import 
org.apache.hadoop.hbase.executor.ExecutorType;
-097import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-098import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-099import 
org.apache.hadoop.hbase.http.InfoServer;
-100import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-101import 
org.apache.hadoop.hbase.ipc.RpcServer;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-105import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-107import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-108import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-109import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-110import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-111import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-112import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-113import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-114import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-115import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-116import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-117import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-118import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-119import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-120import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-121import 
org.apache.hadoop.hbase.master.locking.LockManager;
-122import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-123import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-124import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-125import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;

[22/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337  }
-338
-339  @Override
-340  public 

[22/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/QualifierFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/QualifierFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/QualifierFilter.html
index 3f90c7c..7d25b42 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/QualifierFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/QualifierFilter.html
@@ -29,144 +29,139 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024import java.util.Objects;
-025
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CompareOperator;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-033
-034/**
-035 * This filter is used to filter based on 
the column qualifier. It takes an
-036 * operator (equal, greater, not equal, 
etc) and a byte [] comparator for the
-037 * column qualifier portion of a key.
-038 * p
-039 * This filter can be wrapped with {@link 
WhileMatchFilter} and {@link SkipFilter}
-040 * to add more control.
-041 * p
-042 * Multiple filters can be combined using 
{@link FilterList}.
-043 * p
-044 * If an already known column qualifier 
is looked for, 
-045 * use {@link 
org.apache.hadoop.hbase.client.Get#addColumn}
-046 * directly rather than a filter.
-047 */
-048@InterfaceAudience.Public
-049public class QualifierFilter extends 
CompareFilter {
-050
-051  /**
-052   * Constructor.
-053   * @param op the compare op for column 
qualifier matching
-054   * @param qualifierComparator the 
comparator for column qualifier matching
-055   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0.
-056   * Use {@link 
#QualifierFilter(CompareOperator, ByteArrayComparable)} instead.
-057   */
-058  @Deprecated
-059  public QualifierFilter(final CompareOp 
op,
-060  final ByteArrayComparable 
qualifierComparator) {
-061super(op, qualifierComparator);
-062  }
-063
-064  /**
-065   * Constructor.
-066   * @param op the compare op for column 
qualifier matching
-067   * @param qualifierComparator the 
comparator for column qualifier matching
-068   */
-069  public QualifierFilter(final 
CompareOperator op,
-070 final 
ByteArrayComparable qualifierComparator) {
-071super(op, qualifierComparator);
-072  }
-073
-074  @Deprecated
-075  @Override
-076  public ReturnCode filterKeyValue(final 
Cell c) {
-077return filterCell(c);
-078  }
-079
-080  @Override
-081  public ReturnCode filterCell(final Cell 
c) {
-082int qualifierLength = 
c.getQualifierLength();
-083if (qualifierLength  0) {
-084  if 
(compareQualifier(getCompareOperator(), this.comparator, c)) {
-085return ReturnCode.SKIP;
-086  }
-087}
-088return ReturnCode.INCLUDE;
-089  }
-090
-091  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-092ArrayList? arguments = 
CompareFilter.extractArguments(filterArguments);
-093CompareOperator compareOp = 
(CompareOperator)arguments.get(0);
-094ByteArrayComparable comparator = 
(ByteArrayComparable)arguments.get(1);
-095return new QualifierFilter(compareOp, 
comparator);
-096  }
-097
-098  /**
-099   * @return The filter serialized using 
pb
-100   */
-101  @Override
-102  public byte [] toByteArray() {
-103FilterProtos.QualifierFilter.Builder 
builder =
-104  
FilterProtos.QualifierFilter.newBuilder();
-105
builder.setCompareFilter(super.convert());
-106return 
builder.build().toByteArray();
-107  }
-108
-109  /**
-110   * @param pbBytes A pb serialized 
{@link QualifierFilter} instance
-111   * @return An instance of {@link 
QualifierFilter} made from codebytes/code
-112   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-113   * @see #toByteArray
-114   */
-115  public static QualifierFilter 
parseFrom(final byte [] pbBytes)
-116  throws DeserializationException {
-117FilterProtos.QualifierFilter proto;
-118try {
-119  proto = 
FilterProtos.QualifierFilter.parseFrom(pbBytes);
-120} catch 
(InvalidProtocolBufferException e) {
-121  throw new 
DeserializationException(e);
-122}
-123final CompareOperator valueCompareOp 
=
-124  
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
-125ByteArrayComparable valueComparator = 
null;
-126try {
-127  if 
(proto.getCompareFilter().hasComparator()) {
-128valueComparator = 
ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
-129  }
-130} catch (IOException ioe) {
-131  throw new 

[22/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.html
index 1a6e0ab..e8c5f26 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,12 +109,12 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableMetricsTableSource, MetricsTableSource
+https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable, https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
 title="class or interface in java.lang">AutoCloseable, https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableMetricsTableSource, MetricsTableSource
 
 
 
 @InterfaceAudience.Private
-public class MetricsTableSourceImpl
+public class MetricsTableSourceImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MetricsTableSource
 
@@ -144,6 +144,54 @@ implements closed
 
 
+private MutableFastCounter
+compactedInputBytes
+
+
+private MutableFastCounter
+compactedOutputBytes
+
+
+private MetricHistogram
+compactionInputFileCountHisto
+
+
+private MetricHistogram
+compactionInputSizeHisto
+
+
+private MetricHistogram
+compactionOutputFileCountHisto
+
+
+private MetricHistogram
+compactionOutputSizeHisto
+
+
+private MetricHistogram
+compactionTimeHisto
+
+
+private MutableFastCounter
+flushedMemstoreBytes
+
+
+private MutableFastCounter
+flushedOutputBytes
+
+
+private MetricHistogram
+flushMemstoreSizeHisto
+
+
+private MetricHistogram
+flushOutputSizeHisto
+
+
+private MetricHistogram
+flushTimeHisto
+
+
 private int
 hashCode
 
@@ -152,9 +200,49 @@ implements LOG
 
 
+private MutableFastCounter
+majorCompactedInputBytes
+
+
+private MutableFastCounter
+majorCompactedOutputBytes
+
+
+private MetricHistogram
+majorCompactionInputFileCountHisto
+
+
+private MetricHistogram
+majorCompactionInputSizeHisto
+
+
+private MetricHistogram
+majorCompactionOutputFileCountHisto
+
+
+private MetricHistogram
+majorCompactionOutputSizeHisto
+
+
+private MetricHistogram
+majorCompactionTimeHisto
+
+
 private DynamicMetricsRegistry
 registry
 
+
+private MutableFastCounter
+splitRequest
+
+
+private MutableFastCounter
+splitSuccess
+
+
+private MetricHistogram
+splitTimeHisto
+
 
 private TableName
 tableName
@@ -173,7 +261,7 @@ implements MetricsTableSource
-CP_REQUEST_COUNT,
 CP_REQUEST_COUNT_DESC,
 MEMSTORE_SIZE,
 MEMSTORE_SIZE_DESC,
 READ_REQUEST_COUNT,
 READ_REQUEST_COUNT_DESC,
 STORE_FILE_SIZE,
 STORE_FILE_SIZE_DESC, TABLE_SIZE,
 TABLE_SIZE_DESC,
 TOTAL_REQUEST_COUNT,
 TOTAL_REQUEST_COUNT_DESC,
 WRITE_REQUEST_COUNT,
 WRITE_REQUEST_COUNT_DESC
+TABLE_SIZE,
 TABLE_SIZE_DESC
 
 
 
@@ -219,36 +307,115 @@ implements compareTo(MetricsTableSourcesource)
 
 
+private void
+deregisterMetrics()
+
+
 boolean
 equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objecto)
 
-
+
 MetricsTableAggregateSource
 getAggregateSource()
 Get the aggregate source to which this reports.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getTableName()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getTableNamePrefix()
 
-
+
 MetricsTableWrapperAggregate
 getTableWrapper()
 
-
+
 int
 hashCode()
 
-
+
+void
+incrSplitRequest()
+Increment number of a requested splits
+
+
+
+void
+incrSplitSuccess()
+Increment number of successful splits
+
+
+
+void
+registerMetrics()
+
+
 (package private) void
 snapshot(org.apache.hadoop.metrics2.MetricsRecordBuildermrb,
 booleanignored)
 
+
+void
+updateCompactionInputFileCount(booleanisMajor,
+  

[22/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index bd7445a..3504442 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -1720,459 +1720,459 @@
 1712  LOG.error("Error trying to 
determine if store has references, assuming references exists",
 1713ioe);
 1714  return true;
-1715}
-1716  }
-1717
-1718  /**
-1719   * getter for CompactionProgress 
object
-1720   * @return CompactionProgress object; 
can be null
-1721   */
-1722  public CompactionProgress 
getCompactionProgress() {
-1723return 
this.storeEngine.getCompactor().getProgress();
-1724  }
-1725
-1726  @Override
-1727  public boolean 
shouldPerformMajorCompaction() throws IOException {
-1728for (HStoreFile sf : 
this.storeEngine.getStoreFileManager().getStorefiles()) {
-1729  // TODO: what are these reader 
checks all over the place?
-1730  if (sf.getReader() == null) {
-1731LOG.debug("StoreFile {} has null 
Reader", sf);
-1732return false;
-1733  }
-1734}
-1735return 
storeEngine.getCompactionPolicy().shouldPerformMajorCompaction(
-1736
this.storeEngine.getStoreFileManager().getStorefiles());
-1737  }
-1738
-1739  public 
OptionalCompactionContext requestCompaction() throws IOException {
-1740return 
requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null);
-1741  }
-1742
-1743  public 
OptionalCompactionContext requestCompaction(int priority,
-1744  CompactionLifeCycleTracker 
tracker, User user) throws IOException {
-1745// don't even select for compaction 
if writes are disabled
-1746if (!this.areWritesEnabled()) {
-1747  return Optional.empty();
-1748}
-1749// Before we do compaction, try to 
get rid of unneeded files to simplify things.
-1750removeUnneededFiles();
-1751
-1752final CompactionContext compaction = 
storeEngine.createCompaction();
-1753CompactionRequestImpl request = 
null;
-1754this.lock.readLock().lock();
-1755try {
-1756  synchronized (filesCompacting) {
-1757// First, see if coprocessor 
would want to override selection.
-1758if (this.getCoprocessorHost() != 
null) {
-1759  final ListHStoreFile 
candidatesForCoproc = compaction.preSelect(this.filesCompacting);
-1760  boolean override = 
getCoprocessorHost().preCompactSelection(this,
-1761  candidatesForCoproc, 
tracker, user);
-1762  if (override) {
-1763// Coprocessor is overriding 
normal file selection.
-1764compaction.forceSelect(new 
CompactionRequestImpl(candidatesForCoproc));
-1765  }
-1766}
-1767
-1768// Normal case - coprocessor is 
not overriding file selection.
-1769if (!compaction.hasSelection()) 
{
-1770  boolean isUserCompaction = 
priority == Store.PRIORITY_USER;
-1771  boolean mayUseOffPeak = 
offPeakHours.isOffPeakHour() 
-1772  
offPeakCompactionTracker.compareAndSet(false, true);
-1773  try {
-1774
compaction.select(this.filesCompacting, isUserCompaction,
-1775  mayUseOffPeak, forceMajor 
 filesCompacting.isEmpty());
-1776  } catch (IOException e) {
-1777if (mayUseOffPeak) {
-1778  
offPeakCompactionTracker.set(false);
-1779}
-1780throw e;
-1781  }
-1782  assert 
compaction.hasSelection();
-1783  if (mayUseOffPeak  
!compaction.getRequest().isOffPeak()) {
-1784// Compaction policy doesn't 
want to take advantage of off-peak.
-1785
offPeakCompactionTracker.set(false);
-1786  }
-1787}
-1788if (this.getCoprocessorHost() != 
null) {
-1789  
this.getCoprocessorHost().postCompactSelection(
-1790  this, 
ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker,
-1791  compaction.getRequest(), 
user);
-1792}
-1793// Finally, we have the 
resulting files list. Check if we have any files at all.
-1794request = 
compaction.getRequest();
-1795CollectionHStoreFile 
selectedFiles = request.getFiles();
-1796if (selectedFiles.isEmpty()) {
-1797  return Optional.empty();
+1715} finally {
+1716  if (reloadedStoreFiles != null) 
{
+1717for (HStoreFile storeFile : 
reloadedStoreFiles) {
+1718  try {
+1719
storeFile.closeStoreFile(false);
+1720  } catch (IOException ioe) {
+1721LOG.warn("Encountered 
exception 

[22/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/coc.html
--
diff --git a/coc.html b/coc.html
index 0348c50..e294239 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-23
+  Last Published: 
2018-08-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 1d56516..89c744b 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -440,7 +440,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-23
+  Last Published: 
2018-08-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 70dce3f..bb4d10e 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -890,7 +890,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-23
+  Last Published: 
2018-08-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 5161863..334c8d2 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -313,7 +313,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-23
+  Last Published: 
2018-08-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index bdd6a56..3302203 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -1005,7 +1005,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-23
+  Last Published: 
2018-08-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index 61492ef..c99b5c4 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3817,21 +3817,21 @@
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 date
-"Thu Aug 23 14:38:46 UTC 2018"
+"Fri Aug 24 14:38:46 UTC 2018"
 
 
 
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 revision
-"6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4"
+"a452487a9b82bfd33bc10683c3f8b8ae74d58883"
 
 
 
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 srcChecksum
-"1e08aed9fad639e572ab4a3a705f2a05"
+"6a771691f343c60ea56a144f9db58ab5"
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index ef0e429..0797571 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -28367,8 +28367,66 @@
 
 equals(Object)
 - Method in class 

[22/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}

[22/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/Util.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/assignment/Util.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/Util.html
deleted file mode 100644
index 6fbfdf1..000
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/Util.html
+++ /dev/null
@@ -1,312 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Util (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9,"i1":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.assignment
-Class Util
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.master.assignment.Util
-
-
-
-
-
-
-
-
-@InterfaceAudience.Private
-class Util
-extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Utility for this assignment package only.
-
-
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Modifier
-Constructor and Description
-
-
-private 
-Util()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-(package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
-getRegionInfoResponse(MasterProcedureEnvenv,
- ServerNameregionLocation,
- RegionInfohri)
-Raw call to remote regionserver to get info on a particular 
region.
-
-
-
-(package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
-getRegionInfoResponse(MasterProcedureEnvenv,
- ServerNameregionLocation,
- RegionInfohri,
- booleanincludeBestSplitRow)
-
-
-
-
-
-
-Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Constructor Detail
-
-
-
-
-
-Util
-privateUtil()
-
-
-
-
-
-
-
-
-
-Method Detail
-
-
-
-
-
-getRegionInfoResponse
-staticorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponsegetRegionInfoResponse(MasterProcedureEnvenv,
-  

[22/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
index 9501e97..a10ddfe 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
@@ -131,277 +131,279 @@
 123  }
 124}
 125  } catch (InterruptedException e) 
{
-126e.printStackTrace();
-127  }
-128}
-129
-130@Override
-131public void setup(Context context) 
throws IOException {
-132  Configuration conf = 
context.getConfiguration();
-133  String[] tables = 
conf.getStrings(TABLES_KEY);
-134  this.multiTableSupport = 
conf.getBoolean(MULTI_TABLES_SUPPORT, false);
-135  for (String table : tables) {
-136tableSet.add(table);
-137  }
-138}
-139  }
-140
-141  /**
-142   * A mapper that writes out {@link 
Mutation} to be directly applied to a running HBase instance.
-143   */
-144  protected static class WALMapper
-145  extends MapperWALKey, WALEdit, 
ImmutableBytesWritable, Mutation {
-146private MapTableName, 
TableName tables = new TreeMap();
-147
-148@Override
-149public void map(WALKey key, WALEdit 
value, Context context) throws IOException {
-150  try {
-151if (tables.isEmpty() || 
tables.containsKey(key.getTableName())) {
-152  TableName targetTable =
-153  tables.isEmpty() ? 
key.getTableName() : tables.get(key.getTableName());
-154  ImmutableBytesWritable tableOut 
= new ImmutableBytesWritable(targetTable.getName());
-155  Put put = null;
-156  Delete del = null;
-157  Cell lastCell = null;
-158  for (Cell cell : 
value.getCells()) {
-159// filtering WAL meta 
entries
-160if 
(WALEdit.isMetaEditFamily(cell)) {
-161  continue;
-162}
-163
-164// Allow a subclass filter 
out this cell.
-165if (filter(context, cell)) 
{
-166  // A WALEdit may contain 
multiple operations (HBASE-3584) and/or
-167  // multiple rows 
(HBASE-5229).
-168  // Aggregate as much as 
possible into a single Put/Delete
-169  // operation before writing 
to the context.
-170  if (lastCell == null || 
lastCell.getTypeByte() != cell.getTypeByte()
-171  || 
!CellUtil.matchingRows(lastCell, cell)) {
-172// row or type changed, 
write out aggregate KVs.
-173if (put != null) {
-174  context.write(tableOut, 
put);
-175}
-176if (del != null) {
-177  context.write(tableOut, 
del);
-178}
-179if 
(CellUtil.isDelete(cell)) {
-180  del = new 
Delete(CellUtil.cloneRow(cell));
-181} else {
-182  put = new 
Put(CellUtil.cloneRow(cell));
-183}
-184  }
-185  if 
(CellUtil.isDelete(cell)) {
-186del.add(cell);
-187  } else {
-188put.add(cell);
-189  }
-190}
-191lastCell = cell;
-192  }
-193  // write residual KVs
-194  if (put != null) {
-195context.write(tableOut, 
put);
-196  }
-197  if (del != null) {
-198context.write(tableOut, 
del);
-199  }
-200}
-201  } catch (InterruptedException e) 
{
-202e.printStackTrace();
-203  }
-204}
-205
-206protected boolean filter(Context 
context, final Cell cell) {
-207  return true;
-208}
-209
-210@Override
-211protected void
-212cleanup(MapperWALKey, 
WALEdit, ImmutableBytesWritable, Mutation.Context context)
-213throws IOException, 
InterruptedException {
-214  super.cleanup(context);
-215}
-216
-217@Override
-218public void setup(Context context) 
throws IOException {
-219  String[] tableMap = 
context.getConfiguration().getStrings(TABLE_MAP_KEY);
-220  String[] tablesToUse = 
context.getConfiguration().getStrings(TABLES_KEY);
-221  if (tableMap == null) {
-222tableMap = tablesToUse;
-223  }
-224  if (tablesToUse == null) {
-225// Then user wants all tables.
-226  } else if (tablesToUse.length != 
tableMap.length) {
-227// this can only happen when 
WALMapper is used directly by a class other than WALPlayer
-228throw new IOException("Incorrect 
table mapping specified .");
-229  }
-230  int i = 0;
-231  if (tablesToUse != null) {
-232for (String table : tablesToUse) 
{
-233  

[22/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index b7b4236..3d1edb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + = See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * /code
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializerCacheable() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * pTODO: The caller presumes 
no checksumming
-330   * required of this block instance 
since going into cache; checksum already verified on
-331   * underlying block data pulled in from 
filesystem. Is that correct? What if cache is SSD?
+256  public static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER = new 
BlockDeserializer();
+257
+258  public static final class 
BlockDeserializer implements CacheableDeserializerCacheable {
+259private BlockDeserializer() {
+260}
+261
+262@Override
+263public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
+264throws IOException {
+265  // The buf has the file block 
followed by block metadata.
+266  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
+267  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
+268  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
+269  ByteBuff newByteBuff;
+270  if (reuse) {
+271newByteBuff = buf.slice();
+272  } else {
+273int len = buf.limit();
+274newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
+275newByteBuff.put(0, buf, 
buf.position(), len);
+276  }
+277  // Read 

[22/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.html
index 17a58f7..3627e91 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.html
@@ -322,7 +322,7 @@ extends AbstractPeerProcedure
-acquireLock,
 getLatch,
 getPeerId,
 hasLock,
 holdLock,
 refreshPeer, 
releaseLock,
 rollbackState
+acquireLock,
 getLatch,
 getPeerId,
 holdLock,
 refreshPeer,
 releaseLock,
 rollbackState,
 waitInitialized
 
 
 
@@ -336,7 +336,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#setTimeout-int-">setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.html
index 04a6096..65233db 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.html
@@ -335,7 +335,7 @@ extends AbstractPeerProcedure
-acquireLock,
 getLatch,
 getPeerId,
 hasLock,
 holdLock,
 refreshPeer, 
releaseLock,
 rollbackState
+acquireLock,
 getLatch,
 getPeerId,
 holdLock,
 refreshPeer,
 releaseLock,
 rollbackState,
 waitInitialized
 
 
 
@@ -349,7 +349,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 

[22/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;

[22/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/RegionLoad.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/RegionLoad.html 
b/apidocs/org/apache/hadoop/hbase/RegionLoad.html
index 793255f..ac7c39e 100644
--- a/apidocs/org/apache/hadoop/hbase/RegionLoad.html
+++ b/apidocs/org/apache/hadoop/hbase/RegionLoad.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42};
-var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"],32:["t6","已过时的方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[22/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/NotServingRegionException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/NotServingRegionException.html 
b/apidocs/org/apache/hadoop/hbase/NotServingRegionException.html
index fd82164..1665f28 100644
--- a/apidocs/org/apache/hadoop/hbase/NotServingRegionException.html
+++ b/apidocs/org/apache/hadoop/hbase/NotServingRegionException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 
 
org.apache.hadoop.hbase
-

Class NotServingRegionException

+

ç±» NotServingRegionException


[22/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.html
index 550e16d..92f8ac3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":18,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":18,"i14":18,"i15":6,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":18,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":18,"i13":6,"i14":18,"i15":18,"i16":6,"i17":18,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public interface ReplicationSourceInterface
+public interface ReplicationSourceInterface
 Interface that defines a replication source
 
 
@@ -194,6 +194,12 @@ public interface getWALFileLengthProvider()
 
 
+default https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,ReplicationStatus
+getWalGroupStatus()
+get the stat of replication for each wal group.
+
+
+
 void
 init(org.apache.hadoop.conf.Configurationconf,
 org.apache.hadoop.fs.FileSystemfs,
@@ -208,49 +214,49 @@ public interface Initializer for the source
 
 
-
+
 default boolean
 isPeerEnabled()
 
-
+
 default boolean
 isRecovered()
 
-
+
 boolean
 isSourceActive()
 
-
+
 default boolean
 isSyncReplication()
 
-
+
 void
 postShipEdits(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entryentries,
  intbatchSize)
 Call this after the shipper thread ship some entries to 
peer cluster.
 
 
-
+
 void
 startup()
 Start the replication
 
 
-
+
 void
 terminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason)
 End the replication
 
 
-
+
 void
 terminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exceptioncause)
 End the replication
 
 
-
+
 void
 tryThrottle(intbatchSize)
 Try to throttle when the peer config with a bandwidth
@@ -277,7 +283,7 @@ public interface 
 
 init
-voidinit(org.apache.hadoop.conf.Configurationconf,
+voidinit(org.apache.hadoop.conf.Configurationconf,
   org.apache.hadoop.fs.FileSystemfs,
   ReplicationSourceManagermanager,
   ReplicationQueueStoragequeueStorage,
@@ -306,7 +312,7 @@ public interface 
 
 enqueueLog
-voidenqueueLog(org.apache.hadoop.fs.Pathlog)
+voidenqueueLog(org.apache.hadoop.fs.Pathlog)
 Add a log to the list of logs to replicate
 
 Parameters:
@@ -320,7 +326,7 @@ public interface 
 
 addHFileRefs
-voidaddHFileRefs(TableNametableName,
+voidaddHFileRefs(TableNametableName,
   byte[]family,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
throws ReplicationException
@@ -342,7 +348,7 @@ public interface 
 
 startup
-voidstartup()
+voidstartup()
 Start the replication
 
 
@@ -352,7 +358,7 @@ public interface 
 
 terminate
-voidterminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason)
+voidterminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason)
 End the replication
 
 Parameters:
@@ -366,7 +372,7 @@ public interface 
 
 terminate
-voidterminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
+voidterminate(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,

[22/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index d02c856..f594c43 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -106,6 +106,11 @@
 org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil
 org.apache.hadoop.hbase.security.access.TableAuthManager (implements java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable)
 org.apache.hadoop.hbase.security.access.TableAuthManager.PermissionCacheT
+org.apache.hadoop.hbase.security.User
+
+org.apache.hadoop.hbase.security.access.AccessChecker.InputUser
+
+
 org.apache.hadoop.io.VersionedWritable (implements 
org.apache.hadoop.io.Writable)
 
 org.apache.hadoop.hbase.security.access.Permission
@@ -137,8 +142,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 org.apache.hadoop.hbase.security.access.Permission.Action
+org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 org.apache.hadoop.hbase.security.access.AccessController.OpType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/security/class-use/User.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/security/class-use/User.html 
b/devapidocs/org/apache/hadoop/hbase/security/class-use/User.html
index 93fb149..0b92a31 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/class-use/User.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/class-use/User.html
@@ -1534,6 +1534,21 @@
 
 
 Uses of User in org.apache.hadoop.hbase.security.access
+
+Subclasses of User in org.apache.hadoop.hbase.security.access
+
+Modifier and Type
+Class and Description
+
+
+
+static class
+AccessChecker.InputUser
+A temporary user class to instantiate User instance based 
on the name and groups.
+
+
+
+
 
 Fields in org.apache.hadoop.hbase.security.access
 declared as User
 
@@ -1568,6 +1583,12 @@
 User
 AuthResult.getUser()
 
+
+User
+AccessChecker.validateCallerWithFilterUser(Usercaller,
+TablePermissiontPerm,
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringinputUserName)
+
 
 
 
@@ -1768,13 +1789,14 @@
 
 
 private AuthResult
-AccessController.permissionGranted(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
+AccessController.permissionGranted(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
  Useruser,
  Permission.ActionpermRequest,
  RegionCoprocessorEnvironmente,
+ TableNametableName,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in 
java.util">Collection?families)
-Check the current user for authorization to perform a 
specific action
- against the given set of row data.
+Check the current user for authorization to perform a 
specific action against the given set of
+ row data.
 
 
 
@@ -1797,19 +1819,21 @@
 
 
 void
-AccessChecker.requireGlobalPermission(Useruser,
+AccessChecker.requireGlobalPermission(Useruser,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
Permission.Actionperm,
TableNametableName,
-   https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],? extends https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in 
java.util">Collectionbyte[]familyMap)
+   

[22/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
index 3a1bc007..63feb28 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ShortCircuitMasterConnection
+public class ShortCircuitMasterConnection
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MasterKeepAliveConnection
 A short-circuit connection that can bypass the RPC layer 
(serialization, deserialization,
@@ -538,16 +538,21 @@ implements 
+org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateResponse
+transitReplicationPeerSyncReplicationState(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.TransitReplicationPeerSyncReplicationStateRequestrequest)
+
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse
 truncateTable(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequestrequest)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse
 unassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequestrequest)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse
 updateReplicationPeerConfig(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequestrequest)
@@ -580,7 +585,7 @@ implements 
 
 stub
-private 
finalorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
+private 
finalorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
 
 
 
@@ -597,7 +602,7 @@ implements 
 
 ShortCircuitMasterConnection
-publicShortCircuitMasterConnection(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterfacestub)
+publicShortCircuitMasterConnection(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterfacestub)
 
 
 
@@ -614,7 +619,7 @@ implements 
 
 unassignRegion
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponseunassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

[22/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index cf7a1b6..792aeaf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -784,8 +784,9 @@
   booleanforcible)
 
 
-MoveRegionProcedure(MasterProcedureEnvenv,
-   RegionPlanplan)
+MoveRegionProcedure(MasterProcedureEnvenv,
+   RegionPlanplan,
+   booleancheck)
 
 
 SplitTableRegionProcedure(MasterProcedureEnvenv,
@@ -1116,11 +1117,16 @@
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions)
 
 
+private MoveRegionProcedure
+ReopenTableRegionsProcedure.createReopenProcedure(MasterProcedureEnvenv,
+ HRegionLocationloc)
+
+
 protected static void
 DeleteTableProcedure.deleteAssignmentState(MasterProcedureEnvenv,
  TableNametableName)
 
-
+
 static void
 MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(MasterProcedureEnvenv,
 TableNametableName,
@@ -1130,14 +1136,14 @@
 Remove the column family from the file system
 
 
-
+
 protected static void
 DeleteNamespaceProcedure.deleteDirectory(MasterProcedureEnvenv,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 Delete the namespace directories from the file system
 
 
-
+
 private void
 ModifyTableProcedure.deleteFromFs(MasterProcedureEnvenv,
 TableDescriptoroldTableDescriptor,
@@ -1145,168 +1151,162 @@
 Removes from hdfs the families that are not longer present 
in the new table descriptor.
 
 
-
+
 protected static void
 DeleteTableProcedure.deleteFromFs(MasterProcedureEnvenv,
 TableNametableName,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions,
 booleanarchive)
 
-
+
 protected static void
 DeleteTableProcedure.deleteFromMeta(MasterProcedureEnvenv,
   TableNametableName,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
 
-
+
 protected static void
 DeleteNamespaceProcedure.deleteFromNSTable(MasterProcedureEnvenv,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 delete the row from namespace table
 
 
-
+
 protected static void
 DeleteTableProcedure.deleteTableDescriptorCache(MasterProcedureEnvenv,
   TableNametableName)
 
-
+
 protected static void
 DeleteTableProcedure.deleteTableStates(MasterProcedureEnvenv,
  TableNametableName)
 
-
+
 void
 RSProcedureDispatcher.RemoteProcedureResolver.dispatchCloseRequests(MasterProcedureEnvenv,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionCloseOperationoperations)
 
-
+
 void
 RSProcedureDispatcher.ExecuteProceduresRemoteCall.dispatchCloseRequests(MasterProcedureEnvenv,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionCloseOperationoperations)
 
-
+
 void
 RSProcedureDispatcher.CompatRemoteProcedureResolver.dispatchCloseRequests(MasterProcedureEnvenv,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionCloseOperationoperations)
 
-
+
 void
 RSProcedureDispatcher.RemoteProcedureResolver.dispatchOpenRequests(MasterProcedureEnvenv,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
-
+
 void
 RSProcedureDispatcher.ExecuteProceduresRemoteCall.dispatchOpenRequests(MasterProcedureEnvenv,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
-
+
 void
 

[22/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Pattern pattern) throws IOException {
-455return 

[22/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index fea2b5a..c7a6cc4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -1354,816 +1354,824 @@
 1346   */
 1347  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
 1348  throws IOException {
-1349try (Table t = 
getMetaHTable(connection)) {
-1350  debugLogMutations(ps);
-1351  t.put(ps);
-1352}
-1353  }
-1354
-1355  /**
-1356   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1357   * @param connection connection we're 
using
-1358   * @param d Delete to add to 
hbase:meta
-1359   */
-1360  private static void 
deleteFromMetaTable(final Connection connection, final Delete d)
-1361  throws IOException {
-1362ListDelete dels = new 
ArrayList(1);
-1363dels.add(d);
-1364deleteFromMetaTable(connection, 
dels);
-1365  }
-1366
-1367  /**
-1368   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1369   * @param connection connection we're 
using
-1370   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1371   */
-1372  private static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1373  throws IOException {
-1374try (Table t = 
getMetaHTable(connection)) {
-1375  debugLogMutations(deletes);
-1376  t.delete(deletes);
-1377}
-1378  }
-1379
-1380  /**
-1381   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1382   * @param metaRows rows in 
hbase:meta
-1383   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1384   * @param numReplicasToRemove how many 
replicas to remove
-1385   * @param connection connection we're 
using to access meta table
-1386   */
-1387  public static void 
removeRegionReplicasFromMeta(Setbyte[] metaRows,
-1388int replicaIndexToDeleteFrom, int 
numReplicasToRemove, Connection connection)
-1389  throws IOException {
-1390int absoluteIndex = 
replicaIndexToDeleteFrom + numReplicasToRemove;
-1391for (byte[] row : metaRows) {
-1392  long now = 
EnvironmentEdgeManager.currentTime();
-1393  Delete deleteReplicaLocations = 
new Delete(row);
-1394  for (int i = 
replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
-1395
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1396  getServerColumn(i), now);
-1397
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1398  getSeqNumColumn(i), now);
-1399
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1400  getStartCodeColumn(i), now);
-1401  }
-1402  deleteFromMetaTable(connection, 
deleteReplicaLocations);
-1403}
-1404  }
-1405
-1406  /**
-1407   * Execute the passed 
codemutations/code against codehbase:meta/code 
table.
-1408   * @param connection connection we're 
using
-1409   * @param mutations Puts and Deletes 
to execute on hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
mutateMetaTable(final Connection connection,
-1413 
final ListMutation mutations)
-1414throws IOException {
-1415Table t = 
getMetaHTable(connection);
-1416try {
-1417  debugLogMutations(mutations);
-1418  t.batch(mutations, null);
-1419} catch (InterruptedException e) {
-1420  InterruptedIOException ie = new 
InterruptedIOException(e.getMessage());
-1421  ie.initCause(e);
-1422  throw ie;
-1423} finally {
-1424  t.close();
-1425}
-1426  }
-1427
-1428  private static void 
addRegionStateToPut(Put put, RegionState.State state) throws IOException {
-1429
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-1430.setRow(put.getRow())
-1431
.setFamily(HConstants.CATALOG_FAMILY)
-1432
.setQualifier(getRegionStateColumn())
-1433
.setTimestamp(put.getTimestamp())
-1434.setType(Cell.Type.Put)
-1435
.setValue(Bytes.toBytes(state.name()))
-1436.build());
-1437  }
-1438
-1439  /**
-1440   * Adds daughter region infos to 
hbase:meta row for the specified region. Note that this does not
-1441   * add its daughter's as different 
rows, but adds information about the daughters in the same row
-1442   * as the parent. Use
-1443   * {@link #splitRegion(Connection, 
RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
-1444   * if you want to do that.
-1445   * @param connection connection we're 
using
-1446   * 

[22/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 42d0637..eb16038 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -80,21 +80,21 @@
 072import 
org.apache.hadoop.hbase.PleaseHoldException;
 073import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 074import 
org.apache.hadoop.hbase.ScheduledChore;
-075import 
org.apache.hadoop.hbase.ServerMetricsBuilder;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.Result;
-087import 
org.apache.hadoop.hbase.client.TableDescriptor;
-088import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-089import 
org.apache.hadoop.hbase.client.TableState;
+075import 
org.apache.hadoop.hbase.ServerName;
+076import 
org.apache.hadoop.hbase.TableDescriptors;
+077import 
org.apache.hadoop.hbase.TableName;
+078import 
org.apache.hadoop.hbase.TableNotDisabledException;
+079import 
org.apache.hadoop.hbase.TableNotFoundException;
+080import 
org.apache.hadoop.hbase.UnknownRegionException;
+081import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+083import 
org.apache.hadoop.hbase.client.MasterSwitchType;
+084import 
org.apache.hadoop.hbase.client.RegionInfo;
+085import 
org.apache.hadoop.hbase.client.Result;
+086import 
org.apache.hadoop.hbase.client.TableDescriptor;
+087import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+088import 
org.apache.hadoop.hbase.client.TableState;
+089import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
 090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 091import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 092import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
@@ -220,3477 +220,3481 @@
 212
 213import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 214import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-215import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-216import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-217import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-218import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-219import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-220
-221/**
-222 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-223 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-224 * run the cluster.  All others park 
themselves in their constructor until
-225 * master or cluster shutdown or until 
the active master loses its lease in
-226 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-227 *
-228 * pThe Master can be asked 
shutdown the cluster. See {@link #shutdown()}.  In
-229 * this case it will tell all 
regionservers to go down and then wait on them
-230 * all reporting in that they are down.  
This master will then shut itself down.
-231 *
-232 * pYou can also shutdown just 
this master.  Call {@link #stopMaster()}.
-233 *
-234 * @see org.apache.zookeeper.Watcher
-235 */
-236@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-237@SuppressWarnings("deprecation")
-238public class HMaster extends 
HRegionServer implements MasterServices {
-239  private static Logger LOG = 
LoggerFactory.getLogger(HMaster.class.getName());
-240
-241  /**
-242   * Protection against zombie master. 
Started once Master accepts active responsibility and
-243   * starts taking over responsibilities. 
Allows a finite time window before giving up ownership.
-244   */
-245  private static class 
InitializationMonitor extends HasThread {
-246/** The amount of time in 
milliseconds to sleep before checking initialization status. */
-247public static final String 
TIMEOUT_KEY = 

[22/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index fe1e077..90c31f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -1072,894 +1072,913 @@
 1064
 1065  protected boolean 
waitServerReportEvent(final ServerName serverName, final Procedure proc) {
 1066final ServerStateNode serverNode = 
regionStates.getOrCreateServer(serverName);
-1067return 
serverNode.getReportEvent().suspendIfNotReady(proc);
-1068  }
-1069
-1070  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
-1071
serverNode.getReportEvent().wake(getProcedureScheduler());
-1072  }
-1073
-1074  // 

-1075  //  RIT chore
-1076  // 

-1077  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChoreMasterProcedureEnv {
-1078public RegionInTransitionChore(final 
int timeoutMsec) {
-1079  super(timeoutMsec);
-1080}
-1081
-1082@Override
-1083protected void periodicExecute(final 
MasterProcedureEnv env) {
-1084  final AssignmentManager am = 
env.getAssignmentManager();
-1085
-1086  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
-1087  if 
(ritStat.hasRegionsOverThreshold()) {
-1088for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
-1089  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
-1090}
-1091  }
-1092
-1093  // update metrics
-1094  
am.updateRegionsInTransitionMetrics(ritStat);
-1095}
-1096  }
-1097
-1098  public RegionInTransitionStat 
computeRegionInTransitionStat() {
-1099final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
-1100rit.update(this);
-1101return rit;
-1102  }
-1103
-1104  public static class 
RegionInTransitionStat {
-1105private final int ritThreshold;
+1067if (serverNode == null) {
+1068  LOG.warn("serverName=null; {}", 
proc);
+1069}
+1070return 
serverNode.getReportEvent().suspendIfNotReady(proc);
+1071  }
+1072
+1073  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
+1074
serverNode.getReportEvent().wake(getProcedureScheduler());
+1075  }
+1076
+1077  // 

+1078  //  RIT chore
+1079  // 

+1080  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChoreMasterProcedureEnv {
+1081public RegionInTransitionChore(final 
int timeoutMsec) {
+1082  super(timeoutMsec);
+1083}
+1084
+1085@Override
+1086protected void periodicExecute(final 
MasterProcedureEnv env) {
+1087  final AssignmentManager am = 
env.getAssignmentManager();
+1088
+1089  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
+1090  if 
(ritStat.hasRegionsOverThreshold()) {
+1091for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
+1092  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
+1093}
+1094  }
+1095
+1096  // update metrics
+1097  
am.updateRegionsInTransitionMetrics(ritStat);
+1098}
+1099  }
+1100
+1101  public RegionInTransitionStat 
computeRegionInTransitionStat() {
+1102final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
+1103rit.update(this);
+1104return rit;
+1105  }
 1106
-1107private HashMapString, 
RegionState ritsOverThreshold = null;
-1108private long statTimestamp;
-1109private long oldestRITTime = 0;
-1110private int totalRITsTwiceThreshold 
= 0;
-private int totalRITs = 0;
-1112
-1113@VisibleForTesting
-1114public RegionInTransitionStat(final 
Configuration conf) {
-1115  this.ritThreshold =
-1116
conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, 
DEFAULT_RIT_STUCK_WARNING_THRESHOLD);
-1117}
-1118
-1119public int getRITThreshold() {
-1120  return ritThreshold;
-1121}
-1122
-1123public long getTimestamp() {
-1124  return statTimestamp;
-1125}
-1126
-1127public int getTotalRITs() {
-1128  return totalRITs;
-1129}
-1130
-1131public long getOldestRITTime() {

[22/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out

[22/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 9644187..b979909 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -1199,2602 +1199,2599 @@
 1191ClusterStatusProtos.ServerLoad sl = 
buildServerLoad(reportStartTime, reportEndTime);
 1192try {
 1193  RegionServerReportRequest.Builder 
request = RegionServerReportRequest.newBuilder();
-1194  ServerName sn = 
ServerName.parseVersionedServerName(this.serverName.getVersionedBytes());
-1195  
request.setServer(ProtobufUtil.toServerName(sn));
-1196  request.setLoad(sl);
-1197  rss.regionServerReport(null, 
request.build());
-1198} catch (ServiceException se) {
-1199  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-1200  if (ioe instanceof 
YouAreDeadException) {
-1201// This will be caught and 
handled as a fatal error in run()
-1202throw ioe;
-1203  }
-1204  if (rssStub == rss) {
-1205rssStub = null;
-1206  }
-1207  // Couldn't connect to the master, 
get location from zk and reconnect
-1208  // Method blocks until new master 
is found or we are stopped
-1209  
createRegionServerStatusStub(true);
-1210}
-1211  }
-1212
-1213  /**
-1214   * Reports the given map of Regions 
and their size on the filesystem to the active Master.
-1215   *
-1216   * @param regionSizeStore The store 
containing region sizes
-1217   * @return false if 
FileSystemUtilizationChore should pause reporting to master. true otherwise
-1218   */
-1219  public boolean 
reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) {
-1220
RegionServerStatusService.BlockingInterface rss = rssStub;
-1221if (rss == null) {
-1222  // the current server could be 
stopping.
-1223  LOG.trace("Skipping Region size 
report to HMaster as stub is null");
-1224  return true;
-1225}
-1226try {
-1227  buildReportAndSend(rss, 
regionSizeStore);
-1228} catch (ServiceException se) {
-1229  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-1230  if (ioe instanceof 
PleaseHoldException) {
-1231LOG.trace("Failed to report 
region sizes to Master because it is initializing."
-1232+ " This will be retried.", 
ioe);
-1233// The Master is coming up. Will 
retry the report later. Avoid re-creating the stub.
-1234return true;
-1235  }
-1236  if (rssStub == rss) {
-1237rssStub = null;
-1238  }
-1239  
createRegionServerStatusStub(true);
-1240  if (ioe instanceof 
DoNotRetryIOException) {
-1241DoNotRetryIOException 
doNotRetryEx = (DoNotRetryIOException) ioe;
-1242if (doNotRetryEx.getCause() != 
null) {
-1243  Throwable t = 
doNotRetryEx.getCause();
-1244  if (t instanceof 
UnsupportedOperationException) {
-1245LOG.debug("master doesn't 
support ReportRegionSpaceUse, pause before retrying");
-1246return false;
-1247  }
-1248}
-1249  }
-1250  LOG.debug("Failed to report region 
sizes to Master. This will be retried.", ioe);
-1251}
-1252return true;
-1253  }
-1254
-1255  /**
-1256   * Builds the region size report and 
sends it to the master. Upon successful sending of the
-1257   * report, the region sizes that were 
sent are marked as sent.
-1258   *
-1259   * @param rss The stub to send to the 
Master
-1260   * @param regionSizeStore The store 
containing region sizes
-1261   */
-1262  void 
buildReportAndSend(RegionServerStatusService.BlockingInterface rss,
-1263  RegionSizeStore regionSizeStore) 
throws ServiceException {
-1264RegionSpaceUseReportRequest request 
=
-1265
buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore));
-1266rss.reportRegionSpaceUse(null, 
request);
-1267// Record the number of size reports 
sent
-1268if (metricsRegionServer != null) {
-1269  
metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size());
-1270}
-1271  }
-1272
-1273  /**
-1274   * Builds a {@link 
RegionSpaceUseReportRequest} protobuf message from the region size map.
-1275   *
-1276   * @param regionSizeStore The size in 
bytes of regions
-1277   * @return The corresponding protocol 
buffer message.
-1278   */
-1279  RegionSpaceUseReportRequest 
buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
-1280RegionSpaceUseReportRequest.Builder 
request = RegionSpaceUseReportRequest.newBuilder();
-1281for (EntryRegionInfo, 
RegionSize entry : regionSizes) {
-1282  

[22/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
index 25c940a..59f97b4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
@@ -428,25 +428,27 @@
 420if (cpHost != null) {
 421  switch (state) {
 422case 
MODIFY_TABLE_PRE_OPERATION:
-423  
cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, 
getUser());
-424  break;
-425case 
MODIFY_TABLE_POST_OPERATION:
-426  
cpHost.postCompletedModifyTableAction(getTableName(), 
modifiedTableDescriptor,getUser());
-427  break;
-428default:
-429  throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-430  }
-431}
-432  }
-433
-434  private ListRegionInfo 
getRegionInfoList(final MasterProcedureEnv env) throws IOException {
-435if (regionInfoList == null) {
-436  regionInfoList = 
env.getAssignmentManager().getRegionStates()
-437  
.getRegionsOfTable(getTableName());
-438}
-439return regionInfoList;
-440  }
-441}
+423  
cpHost.preModifyTableAction(getTableName(), unmodifiedTableDescriptor,
+424modifiedTableDescriptor, 
getUser());
+425  break;
+426case 
MODIFY_TABLE_POST_OPERATION:
+427  
cpHost.postCompletedModifyTableAction(getTableName(), 
unmodifiedTableDescriptor,
+428
modifiedTableDescriptor,getUser());
+429  break;
+430default:
+431  throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
+432  }
+433}
+434  }
+435
+436  private ListRegionInfo 
getRegionInfoList(final MasterProcedureEnv env) throws IOException {
+437if (regionInfoList == null) {
+438  regionInfoList = 
env.getAssignmentManager().getRegionStates()
+439  
.getRegionsOfTable(getTableName());
+440}
+441return regionInfoList;
+442  }
+443}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
index 52440ca..1a2cb3b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
@@ -318,48 +318,52 @@
 310jg.writeEndArray();
 311  } else if(value instanceof Number) 
{
 312Number n = (Number)value;
-313jg.writeNumber(n.toString());
-314  } else if(value instanceof Boolean) 
{
-315Boolean b = (Boolean)value;
-316jg.writeBoolean(b);
-317  } else if(value instanceof 
CompositeData) {
-318CompositeData cds = 
(CompositeData)value;
-319CompositeType comp = 
cds.getCompositeType();
-320SetString keys = 
comp.keySet();
-321jg.writeStartObject();
-322for (String key: keys) {
-323  writeAttribute(jg, key, null, 
cds.get(key));
-324}
-325jg.writeEndObject();
-326  } else if(value instanceof 
TabularData) {
-327TabularData tds = 
(TabularData)value;
-328jg.writeStartArray();
-329for(Object entry : tds.values()) 
{
-330  writeObject(jg, description, 
entry);
-331}
-332jg.writeEndArray();
-333  } else {
-334
jg.writeString(value.toString());
-335  }
-336}
-337  }
-338
-339  /**
-340   * Dump out all registered mbeans as 
json on System.out.
-341   * @throws IOException
-342   * @throws 
MalformedObjectNameException
-343   */
-344  public static void dumpAllBeans() 
throws IOException, MalformedObjectNameException {
-345try (PrintWriter writer = new 
PrintWriter(
-346new 
OutputStreamWriter(System.out, StandardCharsets.UTF_8))) {
-347  JSONBean dumper = new JSONBean();
-348  try (JSONBean.Writer jsonBeanWriter 
= dumper.open(writer)) {
-349MBeanServer mbeanServer = 
ManagementFactory.getPlatformMBeanServer();
-350jsonBeanWriter.write(mbeanServer, 
new ObjectName("*:*"), null, false);
-351  }
-352}
-353  }
-354}
+313if 
(Double.isFinite(n.doubleValue())) {
+314  jg.writeNumber(n.toString());
+315} else {
+316  jg.writeString(n.toString());
+317}
+318  } else if(value instanceof Boolean) 
{
+319Boolean b = (Boolean)value;
+320jg.writeBoolean(b);
+321  } 

[22/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -300,7 +300,7 @@
 292  private MapString, 
com.google.protobuf.Service coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize  0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  public 

[22/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;
+075import 

[22/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index e1bc325..63e7421 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 

[22/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index e6e43ee..a8b77ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -367,650 +367,650 @@
 359lock.lock();
 360try {
 361  LOG.trace("Starting WAL Procedure 
Store lease recovery");
-362  FileStatus[] oldLogs = 
getLogFiles();
-363  while (isRunning()) {
+362  while (isRunning()) {
+363FileStatus[] oldLogs = 
getLogFiles();
 364// Get Log-MaxID and recover 
lease on old logs
 365try {
 366  flushLogId = 
initOldLogs(oldLogs);
 367} catch (FileNotFoundException e) 
{
 368  LOG.warn("Someone else is 
active and deleted logs. retrying.", e);
-369  oldLogs = getLogFiles();
-370  continue;
-371}
-372
-373// Create new state-log
-374if (!rollWriter(flushLogId + 1)) 
{
-375  // someone else has already 
created this log
-376  LOG.debug("Someone else has 
already created log " + flushLogId);
-377  continue;
-378}
-379
-380// We have the lease on the log
-381oldLogs = getLogFiles();
-382if (getMaxLogId(oldLogs)  
flushLogId) {
-383  if (LOG.isDebugEnabled()) {
-384LOG.debug("Someone else 
created new logs. Expected maxLogId  " + flushLogId);
-385  }
-386  
logs.getLast().removeFile(this.walArchiveDir);
-387  continue;
-388}
-389
-390LOG.trace("Lease acquired for 
flushLogId={}", flushLogId);
-391break;
-392  }
-393} finally {
-394  lock.unlock();
-395}
-396  }
-397
-398  @Override
-399  public void load(final ProcedureLoader 
loader) throws IOException {
-400lock.lock();
-401try {
-402  if (logs.isEmpty()) {
-403throw new 
RuntimeException("recoverLease() must be called before loading data");
-404  }
-405
-406  // Nothing to do, If we have only 
the current log.
-407  if (logs.size() == 1) {
-408LOG.trace("No state logs to 
replay.");
-409loader.setMaxProcId(0);
-410return;
-411  }
-412
-413  // Load the old logs
-414  final 
IteratorProcedureWALFile it = logs.descendingIterator();
-415  it.next(); // Skip the current 
log
-416
-417  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-418@Override
-419public void setMaxProcId(long 
maxProcId) {
-420  
loader.setMaxProcId(maxProcId);
-421}
-422
-423@Override
-424public void 
load(ProcedureIterator procIter) throws IOException {
-425  loader.load(procIter);
-426}
-427
-428@Override
-429public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-430  
loader.handleCorrupted(procIter);
-431}
-432
-433@Override
-434public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-435  if (corruptedLogs == null) {
-436corruptedLogs = new 
HashSet();
-437  }
-438  corruptedLogs.add(log);
-439  // TODO: sideline corrupted 
log
-440}
-441  });
-442} finally {
-443  try {
-444// try to cleanup inactive wals 
and complete the operation
-445buildHoldingCleanupTracker();
-446tryCleanupLogsOnLoad();
-447loading.set(false);
-448  } finally {
-449lock.unlock();
-450  }
-451}
-452  }
-453
-454  private void tryCleanupLogsOnLoad() {
-455// nothing to cleanup.
-456if (logs.size() = 1) return;
-457
-458// the config says to not cleanup 
wals on load.
-459if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-460  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-461  LOG.debug("WALs cleanup on load is 
not enabled: " + getActiveLogs());
-462  return;
-463}
-464
-465try {
-466  periodicRoll();
-467} catch (IOException e) {
-468  LOG.warn("Unable to cleanup logs on 
load: " + e.getMessage(), e);
-469}
-470  }
-471
-472  @Override
-473  public void insert(final Procedure 
proc, final Procedure[] subprocs) {
-474if (LOG.isTraceEnabled()) {
-475  LOG.trace("Insert " + proc + ", 
subproc=" + Arrays.toString(subprocs));
-476}
-477
-478ByteSlot slot = acquireSlot();
-479try {
-480  // Serialize the insert
-481  long[] subProcIds = null;
-482  if (subprocs != null) {
-483
ProcedureWALFormat.writeInsert(slot, proc, subprocs);
-484subProcIds 

[22/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/backup/BackupDriver.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/BackupDriver.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/BackupDriver.html
index f71cea0..65c1a1f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/BackupDriver.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/BackupDriver.html
@@ -45,180 +45,181 @@
 037import java.io.IOException;
 038import java.net.URI;
 039
-040import 
org.apache.commons.cli.CommandLine;
-041import 
org.apache.hadoop.conf.Configuration;
-042import org.apache.hadoop.fs.Path;
-043import 
org.apache.hadoop.hbase.HBaseConfiguration;
-044import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
-045import 
org.apache.hadoop.hbase.backup.impl.BackupCommands;
-046import 
org.apache.hadoop.hbase.backup.impl.BackupManager;
-047import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
-048import 
org.apache.hadoop.hbase.util.FSUtils;
-049import 
org.apache.hadoop.util.ToolRunner;
-050import org.apache.log4j.Level;
-051import org.apache.log4j.LogManager;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055
-056/**
-057 *
-058 * Command-line entry point for backup 
operation
-059 *
-060 */
-061@InterfaceAudience.Private
-062public class BackupDriver extends 
AbstractHBaseTool {
-063
-064  private static final Logger LOG = 
LoggerFactory.getLogger(BackupDriver.class);
-065  private CommandLine cmd;
-066
-067  public BackupDriver() throws 
IOException {
-068init();
-069  }
-070
-071  protected void init() throws 
IOException {
-072// disable irrelevant loggers to 
avoid it mess up command output
-073
LogUtils.disableZkAndClientLoggers();
-074  }
-075
-076  private int parseAndRun(String[] args) 
throws IOException {
-077
-078// Check if backup is enabled
-079if 
(!BackupManager.isBackupEnabled(getConf())) {
-080  
System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
-081  return -1;
-082}
-083
-084
System.out.println(BackupRestoreConstants.VERIFY_BACKUP);
-085
-086String cmd = null;
-087String[] remainArgs = null;
-088if (args == null || args.length == 0) 
{
-089  printToolUsage();
-090  return -1;
-091} else {
-092  cmd = args[0];
-093  remainArgs = new String[args.length 
- 1];
-094  if (args.length  1) {
-095System.arraycopy(args, 1, 
remainArgs, 0, args.length - 1);
-096  }
-097}
-098
-099BackupCommand type = 
BackupCommand.HELP;
-100if 
(BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
-101  type = BackupCommand.CREATE;
-102} else if 
(BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
-103  type = BackupCommand.HELP;
-104} else if 
(BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) {
-105  type = BackupCommand.DELETE;
-106} else if 
(BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) {
-107  type = BackupCommand.DESCRIBE;
-108} else if 
(BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) {
-109  type = BackupCommand.HISTORY;
-110} else if 
(BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) {
-111  type = BackupCommand.PROGRESS;
-112} else if 
(BackupCommand.SET.name().equalsIgnoreCase(cmd)) {
-113  type = BackupCommand.SET;
-114} else if 
(BackupCommand.REPAIR.name().equalsIgnoreCase(cmd)) {
-115  type = BackupCommand.REPAIR;
-116} else if 
(BackupCommand.MERGE.name().equalsIgnoreCase(cmd)) {
-117  type = BackupCommand.MERGE;
-118} else {
-119  System.out.println("Unsupported 
command for backup: " + cmd);
-120  printToolUsage();
-121  return -1;
-122}
-123
-124// enable debug logging
-125if (this.cmd.hasOption(OPTION_DEBUG)) 
{
-126  
LogManager.getLogger("org.apache.hadoop.hbase.backup").setLevel(Level.DEBUG);
-127} else {
-128  
LogManager.getLogger("org.apache.hadoop.hbase.backup").setLevel(Level.INFO);
-129}
-130
-131BackupCommands.Command command = 
BackupCommands.createCommand(getConf(), type, this.cmd);
-132if (type == BackupCommand.CREATE 
 conf != null) {
-133  ((BackupCommands.CreateCommand) 
command).setConf(conf);
-134}
-135try {
-136  command.execute();
-137} catch (IOException e) {
-138  if 
(e.getMessage().equals(BackupCommands.INCORRECT_USAGE)) {
-139return -1;
-140  }
-141  throw e;
-142} finally {
-143  command.finish();
-144}
-145return 0;
-146  }
-147
-148  @Override
-149  protected void addOptions() {
-150// define supported options
-151addOptNoArg(OPTION_DEBUG, 
OPTION_DEBUG_DESC);
-152addOptWithArg(OPTION_TABLE, 
OPTION_TABLE_DESC);
-153addOptWithArg(OPTION_BANDWIDTH, 
OPTION_BANDWIDTH_DESC);
-154

[22/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/coc.html
--
diff --git a/coc.html b/coc.html
index aaf03f7..52a3905 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -368,7 +368,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index b5d7f9e..e574437 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -667,7 +667,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 8ab035e..732793a 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -433,7 +433,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 5aceb48..b546d2b 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -1098,7 +1098,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index cb25d48..4ab2659 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -306,7 +306,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 65eb514..07dd936 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -974,7 +974,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-23
+  Last Published: 
2018-03-24
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index 60d91aa..8a3e7f5 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3740,28 +3740,28 @@
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 date
-"Fri Mar 23 14:41:35 UTC 2018"
+"Sat Mar 24 15:05:23 UTC 2018"
 
 
 
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 

[22/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
index fb42229..2665b52 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
@@ -215,14 +215,14 @@ service.
 
 
 Durability
-TableDescriptor.getDurability()
-Returns the durability setting for the table.
+Mutation.getDurability()
+Get the current durability
 
 
 
 Durability
-Mutation.getDurability()
-Get the current durability
+TableDescriptor.getDurability()
+Returns the durability setting for the table.
 
 
 
@@ -248,12 +248,14 @@ the order they are declared.
 
 
 
-long
-HTable.incrementColumnValue(byte[]row,
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+AsyncTable.incrementColumnValue(byte[]row,
 byte[]family,
 byte[]qualifier,
 longamount,
-Durabilitydurability)
+Durabilitydurability)
+Atomically increments a column value.
+
 
 
 long
@@ -266,47 +268,45 @@ the order they are declared.
 
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
-AsyncTable.incrementColumnValue(byte[]row,
+long
+HTable.incrementColumnValue(byte[]row,
 byte[]family,
 byte[]qualifier,
 longamount,
-Durabilitydurability)
-Atomically increments a column value.
-
+Durabilitydurability)
 
 
-Delete
-Delete.setDurability(Durabilityd)
-
-
 TableDescriptorBuilder
 TableDescriptorBuilder.setDurability(Durabilitydurability)
 
-
+
 TableDescriptorBuilder.ModifyableTableDescriptor
 TableDescriptorBuilder.ModifyableTableDescriptor.setDurability(Durabilitydurability)
 Sets the Durability 
setting for the table.
 
 
-
-Increment
-Increment.setDurability(Durabilityd)
-
 
-Put
-Put.setDurability(Durabilityd)
-
-
 Append
 Append.setDurability(Durabilityd)
 
-
+
 Mutation
 Mutation.setDurability(Durabilityd)
 Set the durability for this mutation
 
 
+
+Delete
+Delete.setDurability(Durabilityd)
+
+
+Increment
+Increment.setDurability(Durabilityd)
+
+
+Put
+Put.setDurability(Durabilityd)
+
 
 
 
@@ -442,13 +442,13 @@ the order they are declared.
 
 
 Durability
-RowProcessor.useDurability()
-Deprecated.
-
+BaseRowProcessor.useDurability()
 
 
 Durability
-BaseRowProcessor.useDurability()
+RowProcessor.useDurability()
+Deprecated.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
index 14acfc0..d8c0033 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
@@ -399,8 +399,10 @@ service.
 
 
 
-boolean
-HTable.exists(Getget)
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+AsyncTable.exists(Getget)
+Test for the existence of columns in the table, as 
specified by the Get.
+
 
 
 boolean
@@ -409,34 +411,32 @@ service.
 
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.exists(Getget)
-Test for the existence of columns in the table, as 
specified by the Get.
-
+boolean
+HTable.exists(Getget)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-RawAsyncTableImpl.get(Getget)
+AsyncTable.get(Getget)
+Extracts certain cells from a given row.
+
 
 
 Result

[22/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
index f1ed9d4..c15c700 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
@@ -802,23 +802,23 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private Connection
-RestoreTablesClient.conn
+BackupAdminImpl.conn
 
 
-protected Connection
-TableBackupClient.conn
-
-
 (package private) Connection
 BackupCommands.Command.conn
 
+
+private Connection
+RestoreTablesClient.conn
+
 
 protected Connection
-BackupManager.conn
+TableBackupClient.conn
 
 
-private Connection
-BackupAdminImpl.conn
+protected Connection
+BackupManager.conn
 
 
 private Connection
@@ -1132,13 +1132,13 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-(package private) Connection
-ConnectionImplementation.MasterServiceState.connection
-
-
 private Connection
 RegionServerCallable.connection
 
+
+(package private) Connection
+ConnectionImplementation.MasterServiceState.connection
+
 
 
 
@@ -1183,20 +1183,20 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-Connection
-Admin.getConnection()
-
-
 (package private) Connection
 RegionAdminServiceCallable.getConnection()
 
-
+
 protected Connection
 HTable.getConnection()
 INTERNAL Used by unit tests and tools to do 
low-level
  manipulations.
 
 
+
+Connection
+Admin.getConnection()
+
 
 Connection
 HBaseAdmin.getConnection()
@@ -1510,11 +1510,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-TableInputFormatBase.connection
+HRegionPartitioner.connection
 
 
 private Connection
-HRegionPartitioner.connection
+TableInputFormatBase.connection
 
 
 
@@ -1547,22 +1547,22 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-private Connection
-TableOutputFormat.TableRecordWriter.connection
-
-
 (package private) Connection
 MultiTableOutputFormat.MultiTableRecordWriter.connection
 
+
+private Connection
+HRegionPartitioner.connection
+
 
 private Connection
-TableInputFormatBase.connection
-The underlying Connection 
of the table.
-
+TableOutputFormat.TableRecordWriter.connection
 
 
 private Connection
-HRegionPartitioner.connection
+TableInputFormatBase.connection
+The underlying Connection 
of the table.
+
 
 
 (package private) Connection
@@ -1647,15 +1647,15 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-CatalogJanitor.connection
+RegionPlacementMaintainer.connection
 
 
 private Connection
-SnapshotOfRegionAssignmentFromMeta.connection
+CatalogJanitor.connection
 
 
 private Connection
-RegionPlacementMaintainer.connection
+SnapshotOfRegionAssignmentFromMeta.connection
 
 
 
@@ -1823,16 +1823,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-private Connection
-TableQuotaSnapshotStore.conn
+(package private) Connection
+FileArchiverNotifierFactoryImpl.CacheKey.conn
 
 
 private Connection
-SpaceQuotaRefresherChore.conn
+QuotaObserverChore.conn
 
 
 private Connection
-NamespaceQuotaSnapshotStore.conn
+QuotaObserverChore.TablesWithQuotas.conn
 
 
 private Connection
@@ -1840,11 +1840,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-QuotaObserverChore.conn
+NamespaceQuotaSnapshotStore.conn
 
 
 private Connection
-QuotaObserverChore.TablesWithQuotas.conn
+TableQuotaSnapshotStore.conn
 
 
 private Connection
@@ -1855,8 +1855,8 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 FileArchiverNotifierImpl.conn
 
 
-(package private) Connection
-FileArchiverNotifierFactoryImpl.CacheKey.conn
+private Connection
+SpaceQuotaRefresherChore.conn
 
 
 private Connection
@@ -2017,20 +2017,20 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 FileArchiverNotifier
-FileArchiverNotifierFactory.get(Connectionconn,
+FileArchiverNotifierFactoryImpl.get(Connectionconn,
org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
TableNametn)
-Creates or obtains a FileArchiverNotifier instance 
for the given args.
+Returns the FileArchiverNotifier instance 
for the given TableName.
 
 
 
 FileArchiverNotifier
-FileArchiverNotifierFactoryImpl.get(Connectionconn,
+FileArchiverNotifierFactory.get(Connectionconn,
org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
TableNametn)
-Returns the FileArchiverNotifier instance 
for the given TableName.
+Creates or obtains a FileArchiverNotifier instance 
for the 

[22/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index ecf500c..0cd5a4e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -238,8355 +238,8368 @@
 230  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
 231  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 232
-233  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
-234  
"hbase.regionserver.minibatch.size";
-235  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
-236
-237  /**
-238   * This is the global default value for 
durability. All tables/mutations not
-239   * defining a durability or using 
USE_DEFAULT will default to this value.
-240   */
-241  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+233  /**
+234   * This is the global default value for 
durability. All tables/mutations not
+235   * defining a durability or using 
USE_DEFAULT will default to this value.
+236   */
+237  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+238
+239  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
+240  
"hbase.regionserver.minibatch.size";
+241  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
 242
-243  final AtomicBoolean closed = new 
AtomicBoolean(false);
-244
-245  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-246   * want to do while in closing state; 
e.g. like offer this region up to the
-247   * master as a region to close if the 
carrying regionserver is overloaded.
-248   * Once set, it is never cleared.
-249   */
-250  final AtomicBoolean closing = new 
AtomicBoolean(false);
-251
-252  /**
-253   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-254   * less that this sequence id.
-255   */
-256  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-257
-258  /**
-259   * Record the sequence id of last flush 
operation. Can be in advance of
-260   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-261   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-262   */
-263  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-264
-265  /**
-266   * The sequence id of the last replayed 
open region event from the primary region. This is used
-267   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-268   * replication.
-269   */
-270  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-271  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-272
-273  
//
-274  // Members
-275  
//
-276
-277  // map from a locked row to the context 
for that lock including:
-278  // - CountDownLatch for threads waiting 
on that row
-279  // - the thread that owns the lock 
(allow reentrancy)
-280  // - reference count of (reentrant) 
locks held by the thread
-281  // - the row itself
-282  private final 
ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
-283  new ConcurrentHashMap();
-284
-285  protected final Mapbyte[], 
HStore stores =
-286  new 
ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
+243  public static final String 
WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
+244  public static final boolean 
DEFAULT_WAL_HSYNC = false;
+245
+246  final AtomicBoolean closed = new 
AtomicBoolean(false);
+247
+248  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+249   * want to do while in closing state; 
e.g. like offer this region up to the
+250   * master as a region to close if the 
carrying regionserver is overloaded.
+251   * Once set, it is never cleared.
+252   */
+253  final AtomicBoolean closing = new 
AtomicBoolean(false);
+254
+255  /**
+256   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+257   * less that this sequence id.
+258   */
+259  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+260
+261  /**
+262   * Record the sequence id of last flush 
operation. Can be in advance of
+263   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+264   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+265   */
+266  private volatile long lastFlushOpSeqId 
= 

[22/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
index c27b109..4160a88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
@@ -105,7 +105,7 @@
 097try {
 098  done = waitUntilDone(startTime 
* 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
 099} catch (InterruptedException ex) 
{
-100  LOG.error("Replica thread was 
interrupted - no replica calls: " + ex.getMessage());
+100  LOG.error("Replica thread 
interrupted - no replica calls {}", ex.getMessage());
 101  return;
 102}
 103  }
@@ -149,7 +149,7 @@
 141  if (loc == null) return;
 142  HRegionLocation[] locs = 
loc.getRegionLocations();
 143  if (locs.length == 1) {
-144LOG.warn("No replicas found for " 
+ action.getAction());
+144LOG.warn("No replicas found for 
{}", action.getAction());
 145return;
 146  }
 147  synchronized (replicaResultLock) 
{
@@ -230,8 +230,8 @@
 222  return;
 223} catch (Throwable t) {
 224  // This should not happen. 
Let's log  retry anyway.
-225  LOG.error("#" + asyncProcess.id 
+ ", Caught throwable while calling. This is unexpected." +
-226  " Retrying. Server is " + 
server + ", tableName=" + tableName, t);
+225  LOG.error("id=" + 
asyncProcess.id + ", caught throwable. Unexpected." +
+226  " Retrying. Server=" + 
server + ", tableName=" + tableName, t);
 227  
receiveGlobalFailure(multiAction, server, numAttempt, t);
 228  return;
 229}
@@ -247,1036 +247,1035 @@
 239}
 240  } catch (Throwable t) {
 241// Something really bad happened. 
We are on the send thread that will now die.
-242LOG.error("Internal AsyncProcess 
#" + asyncProcess.id + " error for "
-243+ tableName + " processing 
for " + server, t);
-244throw new RuntimeException(t);
-245  } finally {
-246
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
-247if (callsInProgress != null 
 callable != null  res != null) {
-248  
callsInProgress.remove(callable);
-249}
-250  }
-251}
-252  }
-253
-254  private final 
Batch.CallbackCResult callback;
-255  private final BatchErrors errors;
-256  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
-257  private final ExecutorService pool;
-258  private final 
SetCancellableRegionServerCallable callsInProgress;
+242LOG.error("id=" + asyncProcess.id 
+ " error for " + tableName + " processing " + server, t);
+243throw new RuntimeException(t);
+244  } finally {
+245
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
+246if (callsInProgress != null 
 callable != null  res != null) {
+247  
callsInProgress.remove(callable);
+248}
+249  }
+250}
+251  }
+252
+253  private final 
Batch.CallbackCResult callback;
+254  private final BatchErrors errors;
+255  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
+256  private final ExecutorService pool;
+257  private final 
SetCancellableRegionServerCallable callsInProgress;
+258
 259
-260
-261  private final TableName tableName;
-262  private final AtomicLong 
actionsInProgress = new AtomicLong(-1);
-263  /**
-264   * The lock controls access to results. 
It is only held when populating results where
-265   * there might be several callers 
(eventual consistency gets). For other requests,
-266   * there's one unique call going on per 
result index.
-267   */
-268  private final Object replicaResultLock 
= new Object();
-269  /**
-270   * Result array.  Null if results are 
not needed. Otherwise, each index corresponds to
-271   * the action index in initial actions 
submitted. For most request types, has null-s for
-272   * requests that are not done, and 
result/exception for those that are done.
-273   * For eventual-consistency gets, 
initially the same applies; at some point, replica calls
-274   * might be started, and 
ReplicaResultState is put at the corresponding indices. The
-275   * returning calls check the type to 
detect when this is the case. After all calls are done,
-276   * ReplicaResultState-s are replaced 
with results for the user.
-277   */
-278  private final Object[] results;
-279  /**
-280   * Indices of replica gets in results. 
If null, all or no 

[22/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 26b9cc0..6fa64ff 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -949,394 +949,411 @@
 941*/
 942  public static final float 
HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD = 0.2f;
 943
-944  public static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
-945  
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
-946
-947  /**
-948   * pre
-949   * Pattern that matches a coprocessor 
specification. Form is:
-950   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
-951   * where arguments are {@code 
KEY '=' VALUE [,...]}
-952   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
-953   * /pre
-954   */
-955  public static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
-956  
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
-957
-958  public static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
-959  public static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
-960  public static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
-961  "(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
-962  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
-963  public static final String 
CP_HTD_ATTR_INCLUSION_KEY =
-964  
"hbase.coprocessor.classloader.included.classes";
-965
-966  /** The delay when re-trying a socket 
operation in a loop (HBASE-4712) */
-967  public static final int 
SOCKET_RETRY_WAIT_MS = 200;
-968
-969  /** Host name of the local machine */
-970  public static final String LOCALHOST = 
"localhost";
-971
-972  /**
-973   * If this parameter is set to true, 
then hbase will read
-974   * data and then verify checksums. 
Checksum verification
-975   * inside hdfs will be switched off.  
However, if the hbase-checksum
-976   * verification fails, then it will 
switch back to using
-977   * hdfs checksums for verifiying data 
that is being read from storage.
-978   *
-979   * If this parameter is set to false, 
then hbase will not
-980   * verify any checksums, instead it 
will depend on checksum verification
-981   * being done in the hdfs client.
-982   */
-983  public static final String 
HBASE_CHECKSUM_VERIFICATION =
-984  
"hbase.regionserver.checksum.verify";
+944  /**
+945   * @deprecated  It is used internally. 
As of release 2.0.0, this will be removed in HBase 3.0.0.
+946   */
+947  @Deprecated
+948  public static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
+949  
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
+950
+951  /**
+952   * pre
+953   * Pattern that matches a coprocessor 
specification. Form is:
+954   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
+955   * where arguments are {@code 
KEY '=' VALUE [,...]}
+956   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
+957   * /pre
+958   * @deprecated  It is used internally. 
As of release 2.0.0, this will be removed in HBase 3.0.0.
+959   */
+960  @Deprecated
+961  public static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
+962  
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
+963  /**
+964   * @deprecated  It is used internally. 
As of release 2.0.0, this will be removed in HBase 3.0.0.
+965   */
+966  @Deprecated
+967  public static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
+968  /**
+969   * @deprecated  It is used internally. 
As of release 2.0.0, this will be removed in HBase 3.0.0.
+970   */
+971  @Deprecated
+972  public static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
+973  /**
+974   * @deprecated  It is used internally. 
As of release 2.0.0, this will be removed in HBase 3.0.0.
+975   */
+976  @Deprecated
+977  public static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
+978  "(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
+979  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
+980  public static final String 
CP_HTD_ATTR_INCLUSION_KEY =
+981  
"hbase.coprocessor.classloader.included.classes";
+982
+983  /** The delay when re-trying a socket 
operation in a loop (HBASE-4712) */
+984  public static final int 
SOCKET_RETRY_WAIT_MS = 200;
 985
-986  public static final String LOCALHOST_IP 
= "127.0.0.1";
-987
-988  public static final String 
REGION_SERVER_HANDLER_COUNT = "hbase.regionserver.handler.count";
-989  public static final int 
DEFAULT_REGION_SERVER_HANDLER_COUNT = 30;
-990
-991  /*
-992   * 

[22/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
index e5916be..4e0ec23 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class TruncateTableProcedure
+public class TruncateTableProcedure
 extends AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState
 
 
@@ -346,7 +346,7 @@ extends AbstractStateMachineTableProcedure
-acquireLock,
 checkTableModifiable,
 getRegionDir,
 getUser,
 releaseLock,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html#releaseSyncLatch--">releaseSyncLatch,
 setUser
+acquireLock,
 checkTableModifiable,
 getRegionDir,
 getUser,
 preflightChecks, releaseLock,
 releaseSyncLatch,
 setUser
 
 
 
@@ -389,7 +389,7 @@ extends 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -398,7 +398,7 @@ extends 
 
 preserveSplits
-privateboolean preserveSplits
+privateboolean preserveSplits
 
 
 
@@ -407,7 +407,7 @@ extends 
 
 regions
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo regions
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo regions
 
 
 
@@ -416,7 +416,7 @@ extends 
 
 tableDescriptor
-privateTableDescriptor tableDescriptor
+privateTableDescriptor tableDescriptor
 
 
 
@@ -425,7 +425,7 @@ extends 
 
 tableName
-privateTableName tableName
+privateTableName tableName
 
 
 
@@ -442,7 +442,7 @@ extends 
 
 TruncateTableProcedure
-publicTruncateTableProcedure()
+publicTruncateTableProcedure()
 
 
 
@@ -451,9 +451,14 @@ extends 
 
 TruncateTableProcedure
-publicTruncateTableProcedure(MasterProcedureEnvenv,
+publicTruncateTableProcedure(MasterProcedureEnvenv,
   TableNametableName,
-  booleanpreserveSplits)
+  booleanpreserveSplits)
+   throws HBaseIOException
+
+Throws:
+HBaseIOException
+
 
 
 
@@ -462,10 +467,15 @@ extends 
 
 TruncateTableProcedure
-publicTruncateTableProcedure(MasterProcedureEnvenv,
+publicTruncateTableProcedure(MasterProcedureEnvenv,
   TableNametableName,
   booleanpreserveSplits,
-  ProcedurePrepareLatchlatch)
+  ProcedurePrepareLatchlatch)
+   throws HBaseIOException
+
+Throws:
+HBaseIOException
+
 
 
 
@@ -482,7 +492,7 @@ extends 
 
 executeFromState
-protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
+protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableStatestate)
throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Description copied from 
class:StateMachineProcedure
@@ -505,7 +515,7 @@ extends 
 
 rollbackState
-protectedvoidrollbackState(MasterProcedureEnvenv,
+protectedvoidrollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableStatestate)
 Description copied from 
class:StateMachineProcedure
 called to perform the rollback of the specified state
@@ -522,7 +532,7 @@ extends 
 
 completionCleanup
-protectedvoidcompletionCleanup(MasterProcedureEnvenv)
+protectedvoidcompletionCleanup(MasterProcedureEnvenv)
 Description copied from 
class:Procedure
 Called when the procedure is marked as completed (success 
or rollback).
  The procedure implementor may use this method to cleanup in-memory states.
@@ -540,7 +550,7 @@ extends 
 
 isRollbackSupported
-protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableStatestate)
+protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableStatestate)
 Description copied from 
class:StateMachineProcedure
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback 

[22/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
index f794fc9..21dd94d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
@@ -106,7 +106,7 @@
 
 
 private RegionLocateType
-AsyncSingleRequestRpcRetryingCaller.locateType
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
 
 
 RegionLocateType
@@ -114,7 +114,7 @@
 
 
 private RegionLocateType
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
+AsyncSingleRequestRpcRetryingCaller.locateType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
index f6e7bf3..195c3ee 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
@@ -230,14 +230,14 @@ service.
 
 
 private RegionLocator
-HFileOutputFormat2.TableInfo.regionLocator
-
-
-private RegionLocator
 TableInputFormatBase.regionLocator
 The RegionLocator of the 
table.
 
 
+
+private RegionLocator
+HFileOutputFormat2.TableInfo.regionLocator
+
 
 
 
@@ -248,15 +248,15 @@ service.
 
 
 
-RegionLocator
-HFileOutputFormat2.TableInfo.getRegionLocator()
-
-
 protected RegionLocator
 TableInputFormatBase.getRegionLocator()
 Allows subclasses to get the RegionLocator.
 
 
+
+RegionLocator
+HFileOutputFormat2.TableInfo.getRegionLocator()
+
 
 
 



[22/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
index d8c0033..14acfc0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
@@ -399,10 +399,8 @@ service.
 
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.exists(Getget)
-Test for the existence of columns in the table, as 
specified by the Get.
-
+boolean
+HTable.exists(Getget)
 
 
 boolean
@@ -411,32 +409,34 @@ service.
 
 
 
-boolean
-HTable.exists(Getget)
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+AsyncTable.exists(Getget)
+Test for the existence of columns in the table, as 
specified by the Get.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-AsyncTable.get(Getget)
-Extracts certain cells from a given row.
-
+RawAsyncTableImpl.get(Getget)
 
 
 Result
+HTable.get(Getget)
+
+
+Result
 Table.get(Getget)
 Extracts certain cells from a given row.
 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-AsyncTableImpl.get(Getget)
-
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-RawAsyncTableImpl.get(Getget)
+AsyncTableImpl.get(Getget)
 
 
-Result
-HTable.get(Getget)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
+AsyncTable.get(Getget)
+Extracts certain cells from a given row.
+
 
 
 private Result
@@ -457,10 +457,8 @@ service.
 
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
-Test for the existence of columns in the table, as 
specified by the Gets.
-
+boolean[]
+HTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 
 
 boolean[]
@@ -469,16 +467,12 @@ service.
 
 
 
-boolean[]
-HTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
-
-
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.existsAll(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
-A simple version for batch exists.
+default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+AsyncTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+Test for the existence of columns in the table, as 

[22/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html 
b/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html
index 4a4ab48..b4a6ad3 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/class-use/WAL.Entry.html
@@ -372,7 +372,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entry
-ReplicationSourceWALReader.WALEntryBatch.walEntries
+WALEntryBatch.walEntries
 
 
 
@@ -389,7 +389,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 WAL.Entry
-WALEntryStream.next()
+WALEntryStream.next()
+Returns the next WAL entry in this stream and advance the 
stream.
+
+
+
+WAL.Entry
+WALEntryStream.peek()
+Returns the next WAL entry in this stream but does not 
advance.
+
 
 
 
@@ -411,7 +419,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entry
-ReplicationSourceWALReader.WALEntryBatch.getWalEntries()
+WALEntryBatch.getWalEntries()
 
 
 
@@ -424,7 +432,17 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-ReplicationSourceWALReader.WALEntryBatch.addEntry(WAL.Entryentry)
+WALEntryBatch.addEntry(WAL.Entryentry)
+
+
+private boolean
+SerialReplicationChecker.canPush(WAL.Entryentry,
+   byte[]row)
+
+
+boolean
+SerialReplicationChecker.canPush(WAL.Entryentry,
+   CellfirstCellInEdit)
 
 
 private WAL.Entry
@@ -440,11 +458,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private void
-ReplicationSourceWALReader.updateBatchStats(ReplicationSourceWALReader.WALEntryBatchbatch,
+ReplicationSourceWALReader.updateBatchStats(WALEntryBatchbatch,
 WAL.Entryentry,
-longentryPosition,
 longentrySize)
 
+
+void
+SerialReplicationChecker.waitUntilCanPush(WAL.Entryentry,
+CellfirstCellInEdit)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index aa5da8e..b37992f 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -2218,6 +2218,7 @@
 org.apache.hadoop.hbase.MetaTableAccessor.TableVisitorBase
 
 
+org.apache.hadoop.hbase.MetaTableAccessor.ReplicationBarrierResult
 org.apache.hadoop.hbase.zookeeper.MetaTableLocator
 org.apache.hadoop.hbase.util.Methods
 org.apache.hadoop.metrics2.util.MetricQuantile
@@ -3058,7 +3059,6 @@
 org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceFactory
 org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager (implements 
org.apache.hadoop.hbase.replication.ReplicationListener)
 org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALActionListener (implements 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
-org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.WALEntryBatch
 org.apache.hadoop.hbase.replication.ReplicationStorageFactory
 org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.DummyServer (implements 
org.apache.hadoop.hbase.Server)
 org.apache.hadoop.hbase.replication.regionserver.ReplicationThrottler
@@ -3347,6 +3347,7 @@
 org.apache.hadoop.hbase.regionserver.SegmentScanner (implements 
org.apache.hadoop.hbase.regionserver.KeyValueScanner)
 org.apache.hadoop.hbase.io.asyncfs.SendBufSizePredictor
 org.apache.hadoop.hbase.regionserver.wal.SequenceIdAccounting
+org.apache.hadoop.hbase.replication.regionserver.SerialReplicationChecker
 org.apache.hadoop.hbase.master.balancer.ServerAndLoad (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 org.apache.hadoop.hbase.ipc.ServerCallT (implements 
org.apache.hadoop.hbase.ipc.RpcCall, 
org.apache.hadoop.hbase.ipc.RpcResponse)
 
@@ -4092,6 +4093,7 @@
 
 
 org.apache.hadoop.hbase.wal.WALEdit 
(implements org.apache.hadoop.hbase.io.HeapSize)
+org.apache.hadoop.hbase.replication.regionserver.WALEntryBatch
 org.apache.hadoop.hbase.replication.regionserver.WALEntryStream (implements java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class 

[22/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index f660c5a..2d899da 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -127,116 +127,116 @@ public interface Method and Description
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 abortProcedure(longprocId,
   booleanmayInterruptIfRunning)
 abort a procedure
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 addColumnFamily(TableNametableName,
ColumnFamilyDescriptorcolumnFamily)
 Add a column family to an existing table.
 
 
 
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-addReplicationPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+addReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
   ReplicationPeerConfigpeerConfig)
 Add a new replication peer for replicating data to slave 
cluster
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-addReplicationPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+addReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
   ReplicationPeerConfigpeerConfig,
   booleanenabled)
 Add a new replication peer for replicating data to slave 
cluster
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-appendReplicationPeerTableCFs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
- http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCfs)

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
index 4f5b33a..4361237 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
@@ -278,567 +278,568 @@
 270  } else {
 271LOG.error(msg, e);
 272setFailure(e);
-273  }
-274}
-275// if split fails,  need to call 
((HRegion)parent).clearSplit() when it is a force split
-276return Flow.HAS_MORE_STATE;
-277  }
-278
-279  /**
-280   * To rollback {@link 
SplitTableRegionProcedure}, an AssignProcedure is asynchronously
-281   * submitted for parent region to be 
split (rollback doesn't wait on the completion of the
-282   * AssignProcedure) . This can be 
improved by changing rollback() to support sub-procedures.
-283   * See HBASE-19851 for details.
-284   */
-285  @Override
-286  protected void rollbackState(final 
MasterProcedureEnv env, final SplitTableRegionState state)
-287  throws IOException, 
InterruptedException {
-288if (isTraceEnabled()) {
-289  LOG.trace(this + " rollback state=" 
+ state);
-290}
-291
-292try {
-293  switch (state) {
-294  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-295  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-296  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-297  case 
SPLIT_TABLE_REGION_UPDATE_META:
-298// PONR
-299throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-300  case 
SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
-301break;
-302  case 
SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
-303// Doing nothing, as re-open 
parent region would clean up daughter region directories.
-304break;
-305  case 
SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
-306openParentRegion(env);
-307break;
-308  case 
SPLIT_TABLE_REGION_PRE_OPERATION:
-309postRollBackSplitRegion(env);
-310break;
-311  case SPLIT_TABLE_REGION_PREPARE:
-312break; // nothing to do
-313  default:
-314throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-315  }
-316} catch (IOException e) {
-317  // This will be retried. Unless 
there is a bug in the code,
-318  // this should be just a "temporary 
error" (e.g. network down)
-319  LOG.warn("pid=" + getProcId() + " 
failed rollback attempt step " + state +
-320  " for splitting the region "
-321+ 
getParentRegion().getEncodedName() + " in table " + getTableName(), e);
-322  throw e;
-323}
-324  }
-325
-326  /*
-327   * Check whether we are in the state 
that can be rollback
-328   */
-329  @Override
-330  protected boolean 
isRollbackSupported(final SplitTableRegionState state) {
-331switch (state) {
-332  case 
SPLIT_TABLE_REGION_POST_OPERATION:
-333  case 
SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
-334  case 
SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
-335  case 
SPLIT_TABLE_REGION_UPDATE_META:
-336// It is not safe to rollback if 
we reach to these states.
-337return false;
-338  default:
-339break;
-340}
-341return true;
-342  }
-343
-344  @Override
-345  protected SplitTableRegionState 
getState(final int stateId) {
-346return 
SplitTableRegionState.forNumber(stateId);
-347  }
-348
-349  @Override
-350  protected int getStateId(final 
SplitTableRegionState state) {
-351return state.getNumber();
-352  }
-353
-354  @Override
-355  protected SplitTableRegionState 
getInitialState() {
-356return 
SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
-357  }
-358
-359  @Override
-360  protected void 
serializeStateData(ProcedureStateSerializer serializer)
-361  throws IOException {
-362
super.serializeStateData(serializer);
-363
-364final 
MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
-365
MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
-366
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-367
.setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion()))
-368
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_1_RI))
-369
.addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_2_RI));
-370
serializer.serialize(splitTableRegionMsg.build());
-371  }
-372
-373  @Override
-374  protected void 
deserializeStateData(ProcedureStateSerializer serializer)
-375  

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 6d8c565..7edb3ff 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2726,863 +2726,873 @@
 2718}
 2719  }
 2720
-2721  void checkInitialized()
-2722  throws PleaseHoldException, 
ServerNotRunningYetException, MasterNotRunningException {
-2723checkServiceStarted();
-2724if (!isInitialized()) throw new 
PleaseHoldException("Master is initializing");
-2725if (isStopped()) throw new 
MasterNotRunningException();
-2726  }
-2727
-2728  /**
-2729   * Report whether this master is 
currently the active master or not.
-2730   * If not active master, we are parked 
on ZK waiting to become active.
-2731   *
-2732   * This method is used for testing.
-2733   *
-2734   * @return true if active master, 
false if not.
-2735   */
-2736  @Override
-2737  public boolean isActiveMaster() {
-2738return activeMaster;
-2739  }
-2740
-2741  /**
-2742   * Report whether this master has 
completed with its initialization and is
-2743   * ready.  If ready, the master is 
also the active master.  A standby master
-2744   * is never ready.
-2745   *
-2746   * This method is used for testing.
-2747   *
-2748   * @return true if master is ready to 
go, false if not.
-2749   */
-2750  @Override
-2751  public boolean isInitialized() {
-2752return initialized.isReady();
-2753  }
-2754
-2755  /**
-2756   * Report whether this master is in 
maintenance mode.
+2721  public static class 
MasterStoppedException extends DoNotRetryIOException {
+2722MasterStoppedException() {
+2723  super();
+2724}
+2725  }
+2726
+2727  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
+2728  MasterNotRunningException, 
MasterStoppedException {
+2729checkServiceStarted();
+2730if (!isInitialized()) {
+2731  throw new 
PleaseHoldException("Master is initializing");
+2732}
+2733if (isStopped()) {
+2734  throw new 
MasterStoppedException();
+2735}
+2736  }
+2737
+2738  /**
+2739   * Report whether this master is 
currently the active master or not.
+2740   * If not active master, we are parked 
on ZK waiting to become active.
+2741   *
+2742   * This method is used for testing.
+2743   *
+2744   * @return true if active master, 
false if not.
+2745   */
+2746  @Override
+2747  public boolean isActiveMaster() {
+2748return activeMaster;
+2749  }
+2750
+2751  /**
+2752   * Report whether this master has 
completed with its initialization and is
+2753   * ready.  If ready, the master is 
also the active master.  A standby master
+2754   * is never ready.
+2755   *
+2756   * This method is used for testing.
 2757   *
-2758   * @return true if master is in 
maintenanceMode
+2758   * @return true if master is ready to 
go, false if not.
 2759   */
 2760  @Override
-2761  public boolean isInMaintenanceMode() 
{
-2762return 
maintenanceModeTracker.isInMaintenanceMode();
+2761  public boolean isInitialized() {
+2762return initialized.isReady();
 2763  }
 2764
-2765  @VisibleForTesting
-2766  public void setInitialized(boolean 
isInitialized) {
-2767
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2768  }
-2769
+2765  /**
+2766   * Report whether this master is in 
maintenance mode.
+2767   *
+2768   * @return true if master is in 
maintenanceMode
+2769   */
 2770  @Override
-2771  public ProcedureEvent? 
getInitializedEvent() {
-2772return initialized;
+2771  public boolean isInMaintenanceMode() 
{
+2772return 
maintenanceModeTracker.isInMaintenanceMode();
 2773  }
 2774
-2775  /**
-2776   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2777   * of crashed servers.
-2778   * @return true if assignMeta has 
completed;
-2779   */
+2775  @VisibleForTesting
+2776  public void setInitialized(boolean 
isInitialized) {
+2777
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
+2778  }
+2779
 2780  @Override
-2781  public boolean 
isServerCrashProcessingEnabled() {
-2782return 
serverCrashProcessingEnabled.isReady();
+2781  public ProcedureEvent? 
getInitializedEvent() {
+2782return initialized;
 2783  }
 2784
-2785  @VisibleForTesting
-2786  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2787
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2788  }
-2789
-2790  public ProcedureEvent? 
getServerCrashProcessingEnabledEvent() {
-2791return 
serverCrashProcessingEnabled;
-2792  }
-2793
-2794  /**
-2795   * Compute the average load across 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index 56a2ea1..98104cb 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -449,14 +449,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 TableDescriptor
-HTable.getDescriptor()
-
-
-TableDescriptor
 Table.getDescriptor()
 Gets the table 
descriptor for this table.
 
 
+
+TableDescriptor
+HTable.getDescriptor()
+
 
 TableDescriptor
 Admin.getDescriptor(TableNametableName)
@@ -509,51 +509,51 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
-AsyncAdmin.getDescriptor(TableNametableName)
-Method for getting the tableDescriptor
-
+AsyncHBaseAdmin.getDescriptor(TableNametableName)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
-RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
+AsyncAdmin.getDescriptor(TableNametableName)
+Method for getting the tableDescriptor
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
-AsyncHBaseAdmin.getDescriptor(TableNametableName)
+RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
 
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequestrequest)
 
 
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-AsyncAdmin.listTableDescriptors()
-List all the userspace tables.
-
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 Admin.listTableDescriptors()
 List all the userspace tables.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 HBaseAdmin.listTableDescriptors()
 
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+AsyncAdmin.listTableDescriptors()
+List all the userspace tables.
+
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
-List all the tables.
-
+AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
+AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
+List all the tables.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
+RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
 
 
 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
index 2ac1b78..90f52b0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
@@ -106,7 +106,7 @@
 
 
 private RegionLocateType
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
+AsyncSingleRequestRpcRetryingCaller.locateType
 
 
 RegionLocateType
@@ -114,7 +114,7 @@
 
 
 private RegionLocateType
-AsyncSingleRequestRpcRetryingCaller.locateType
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
index fbe0658..e062eb5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
@@ -230,13 +230,13 @@ service.
 
 
 private RegionLocator
-TableInputFormatBase.regionLocator
-The RegionLocator of the 
table.
-
+HFileOutputFormat2.TableInfo.regionLocator
 
 
 private RegionLocator
-HFileOutputFormat2.TableInfo.regionLocator
+TableInputFormatBase.regionLocator
+The RegionLocator of the 
table.
+
 
 
 
@@ -248,15 +248,15 @@ service.
 
 
 
+RegionLocator
+HFileOutputFormat2.TableInfo.getRegionLocator()
+
+
 protected RegionLocator
 TableInputFormatBase.getRegionLocator()
 Allows subclasses to get the RegionLocator.
 
 
-
-RegionLocator
-HFileOutputFormat2.TableInfo.getRegionLocator()
-
 
 
 



[22/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 5e1590b..d481372 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-Returns a new RpcRetryingCallerFactory from the given 
Configuration.
-
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+Returns a new RpcRetryingCallerFactory from the given 
Configuration.
+
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 018438c..6384833 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,14 +283,6 @@ service.
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
-
-
-protected Scan
-ScannerCallable.scan
-
-
-private Scan
 ScannerCallableWithReplicas.scan
 
 
@@ -307,6 +299,14 @@ service.
 
 
 private Scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
+
+
+protected Scan
+ScannerCallable.scan
+
+
+private Scan
 TableSnapshotScanner.scan
 
 
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
-
-
-ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
-
+
 ResultScanner
 Table.getScanner(Scanscan)
 Returns a scanner on the current table as specified by the 
Scan
  object.
 
 
-
+
 ResultScanner
 AsyncTableImpl.getScanner(Scanscan)
 
+
+ResultScanner
+RawAsyncTableImpl.getScanner(Scanscan)
+
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
@@ -703,7 +703,9 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-RawAsyncTableImpl.scanAll(Scanscan)
+AsyncTable.scanAll(Scanscan)
+Return all the results that match the given scan 
object.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -711,9 +713,7 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 private Scan
@@ 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
index 4c96d78..f3e8e8b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
@@ -620,72 +620,72 @@ service.
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
 boolean
-Table.checkAndPut(byte[]row,
+HTable.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareOperatorop,
byte[]value,
Putput)
-Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
-
+Deprecated.
 
 
 
 boolean
-HTable.checkAndPut(byte[]row,
+Table.checkAndPut(byte[]row,
byte[]family,
byte[]qualifier,
CompareOperatorop,
byte[]value,
Putput)
-Deprecated.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
@@ -718,27 +718,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTable.put(Putput)
-Puts some data to the table.
-
+RawAsyncTableImpl.put(Putput)
 
 
 void
+HTable.put(Putput)
+
+
+void
 Table.put(Putput)
 Puts some data in the table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncTableImpl.put(Putput)
-
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncTableImpl.put(Putput)
+AsyncTableImpl.put(Putput)
 
 
-void
-HTable.put(Putput)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncTable.put(Putput)
+Puts some data to the table.
+
 
 
 boolean
@@ -757,27 +757,27 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
+RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
 
 
 boolean
-Table.CheckAndMutateBuilder.thenPut(Putput)
+HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
+boolean
+Table.CheckAndMutateBuilder.thenPut(Putput)
 
 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 5e1590b..d481372 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-Returns a new RpcRetryingCallerFactory from the given 
Configuration.
-
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+Returns a new RpcRetryingCallerFactory from the given 
Configuration.
+
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 018438c..6384833 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,14 +283,6 @@ service.
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
-
-
-protected Scan
-ScannerCallable.scan
-
-
-private Scan
 ScannerCallableWithReplicas.scan
 
 
@@ -307,6 +299,14 @@ service.
 
 
 private Scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
+
+
+protected Scan
+ScannerCallable.scan
+
+
+private Scan
 TableSnapshotScanner.scan
 
 
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
-
-
-ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
-
+
 ResultScanner
 Table.getScanner(Scanscan)
 Returns a scanner on the current table as specified by the 
Scan
  object.
 
 
-
+
 ResultScanner
 AsyncTableImpl.getScanner(Scanscan)
 
+
+ResultScanner
+RawAsyncTableImpl.getScanner(Scanscan)
+
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
@@ -703,7 +703,9 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-RawAsyncTableImpl.scanAll(Scanscan)
+AsyncTable.scanAll(Scanscan)
+Return all the results that match the given scan 
object.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -711,9 +713,7 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 private Scan
@@ 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index fb9bdb3..4584cda 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,7 +137,9 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -146,22 +148,22 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots()
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+HBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
+AsyncHBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -170,18 +172,16 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index bb263a4..203e85c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"6519b98ac3115c4442a2778f6ed7b39ce5cd3b83";
+011  public static final String revision = 
"170ffbba683217bdb30e5c99f0e728e0dc660d56";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Sat 
Feb  3 14:41:05 UTC 2018";
+013  public static final String date = "Sun 
Feb  4 14:41:34 UTC 2018";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "e1f78921fcd876d508017ada55edc99a";
+015  public static final String srcChecksum 
= "3c3a6a55ea36b8cbac6c726d5fe311de";
 016}
 
 



[22/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html 
b/devapidocs/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
index e1dc6a3..11511ca 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class DisabledWALProvider.DisabledWAL
+private static class DisabledWALProvider.DisabledWAL
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements WAL
 
@@ -349,7 +349,7 @@ implements 
 
 listeners
-protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALActionsListener listeners
+protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALActionsListener listeners
 
 
 
@@ -358,7 +358,7 @@ implements 
 
 path
-protected finalorg.apache.hadoop.fs.Path path
+protected finalorg.apache.hadoop.fs.Path path
 
 
 
@@ -367,7 +367,7 @@ implements 
 
 coprocessorHost
-protected finalWALCoprocessorHost coprocessorHost
+protected finalWALCoprocessorHost coprocessorHost
 
 
 
@@ -376,7 +376,7 @@ implements 
 
 closed
-protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
+protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
 
 
 
@@ -393,7 +393,7 @@ implements 
 
 DisabledWAL
-publicDisabledWAL(org.apache.hadoop.fs.Pathpath,
+publicDisabledWAL(org.apache.hadoop.fs.Pathpath,
org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALActionsListenerlisteners)
 
@@ -412,7 +412,7 @@ implements 
 
 registerWALActionsListener
-publicvoidregisterWALActionsListener(WALActionsListenerlistener)
+publicvoidregisterWALActionsListener(WALActionsListenerlistener)
 Description copied from 
interface:WAL
 Registers WALActionsListener
 
@@ -427,7 +427,7 @@ implements 
 
 unregisterWALActionsListener
-publicbooleanunregisterWALActionsListener(WALActionsListenerlistener)
+publicbooleanunregisterWALActionsListener(WALActionsListenerlistener)
 Description copied from 
interface:WAL
 Unregisters WALActionsListener
 
@@ -442,7 +442,7 @@ implements 
 
 rollWriter
-publicbyte[][]rollWriter()
+publicbyte[][]rollWriter()
 Description copied from 
interface:WAL
 Roll the log writer. That is, start writing log messages to 
a new file.
 
@@ -465,7 +465,7 @@ implements 
 
 rollWriter
-publicbyte[][]rollWriter(booleanforce)
+publicbyte[][]rollWriter(booleanforce)
 Description copied from 
interface:WAL
 Roll the log writer. That is, start writing log messages to 
a new file.
 
@@ -491,7 +491,7 @@ implements 
 
 shutdown
-publicvoidshutdown()
+publicvoidshutdown()
 Description copied from 
interface:WAL
 Stop accepting new writes. If we have unsynced writes still 
in buffer, sync them.
  Extant edits are left in place in backing storage to be replayed later.
@@ -507,7 +507,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
 Description copied from 
interface:WAL
 Caller no longer needs any edits from this WAL. 
Implementers are free to reclaim
  underlying resources after this call; i.e. filesystem based WALs can archive 
or
@@ -528,7 +528,7 @@ implements 
 
 append
-publiclongappend(RegionInfoinfo,
+publiclongappend(RegionInfoinfo,
WALKeyImplkey,
WALEditedits,
booleaninMemstore)
@@ -563,7 +563,7 @@ implements 
 
 updateStore
-publicvoidupdateStore(byte[]encodedRegionName,
+publicvoidupdateStore(byte[]encodedRegionName,
 byte[]familyName,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Longsequenceid,
 booleanonlyIfGreater)
@@ -583,7 +583,7 @@ implements 
 
 sync
-publicvoidsync()
+publicvoidsync()
 Description copied from 
interface:WAL
 Sync what we have in the WAL.
 
@@ -598,7 +598,7 @@ implements 
 
 sync
-publicvoidsync(longtxid)
+publicvoidsync(longtxid)
 Description copied from 
interface:WAL
 Sync the WAL if the txId was not already sync'd.
 
@@ -615,7 +615,7 @@ implements 
 
 

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
new file mode 100644
index 000..7ab1e16
--- /dev/null
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
@@ -0,0 +1,6248 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.HBaseClassTestRule (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of Classorg.apache.hadoop.hbase.HBaseClassTestRule
+
+
+
+
+
+Packages that use HBaseClassTestRule
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase
+
+
+
+org.apache.hadoop.hbase.backup
+
+
+
+org.apache.hadoop.hbase.backup.example
+
+
+
+org.apache.hadoop.hbase.backup.master
+
+
+
+org.apache.hadoop.hbase.chaos.actions
+
+
+
+org.apache.hadoop.hbase.client
+
+
+
+org.apache.hadoop.hbase.client.example
+
+
+
+org.apache.hadoop.hbase.client.locking
+
+
+
+org.apache.hadoop.hbase.client.replication
+
+
+
+org.apache.hadoop.hbase.client.rsgroup
+
+
+
+org.apache.hadoop.hbase.codec
+
+
+
+org.apache.hadoop.hbase.conf
+
+
+
+org.apache.hadoop.hbase.constraint
+
+
+
+org.apache.hadoop.hbase.coprocessor
+
+
+
+org.apache.hadoop.hbase.coprocessor.example
+
+
+
+org.apache.hadoop.hbase.errorhandling
+
+
+
+org.apache.hadoop.hbase.exceptions
+
+
+
+org.apache.hadoop.hbase.executor
+
+
+
+org.apache.hadoop.hbase.favored
+
+
+
+org.apache.hadoop.hbase.filter
+
+
+
+org.apache.hadoop.hbase.fs
+
+
+
+org.apache.hadoop.hbase.http
+
+
+
+org.apache.hadoop.hbase.http.conf
+
+
+
+org.apache.hadoop.hbase.http.jmx
+
+
+
+org.apache.hadoop.hbase.http.lib
+
+
+
+org.apache.hadoop.hbase.http.log
+
+
+
+org.apache.hadoop.hbase.io
+
+
+
+org.apache.hadoop.hbase.io.asyncfs
+
+
+
+org.apache.hadoop.hbase.io.crypto
+
+
+
+org.apache.hadoop.hbase.io.crypto.aes
+
+
+
+org.apache.hadoop.hbase.io.encoding
+
+
+
+org.apache.hadoop.hbase.io.hadoopbackport
+
+
+
+org.apache.hadoop.hbase.io.hfile
+
+
+
+org.apache.hadoop.hbase.io.hfile.bucket
+
+
+
+org.apache.hadoop.hbase.io.util
+
+
+
+org.apache.hadoop.hbase.ipc
+
+
+
+org.apache.hadoop.hbase.mapred
+
+
+
+org.apache.hadoop.hbase.mapreduce
+
+
+
+org.apache.hadoop.hbase.master
+
+
+
+org.apache.hadoop.hbase.master.assignment
+
+
+
+org.apache.hadoop.hbase.master.balancer
+
+
+
+org.apache.hadoop.hbase.master.cleaner
+
+
+
+org.apache.hadoop.hbase.master.locking
+
+
+
+org.apache.hadoop.hbase.master.normalizer
+
+
+
+org.apache.hadoop.hbase.master.procedure
+
+
+
+org.apache.hadoop.hbase.master.snapshot
+
+
+
+org.apache.hadoop.hbase.metrics
+
+
+
+org.apache.hadoop.hbase.metrics.impl
+
+
+
+org.apache.hadoop.hbase.mob
+
+
+
+org.apache.hadoop.hbase.mob.compactions
+
+
+
+org.apache.hadoop.hbase.monitoring
+
+
+
+org.apache.hadoop.hbase.namespace
+
+
+
+org.apache.hadoop.hbase.nio
+
+
+
+org.apache.hadoop.hbase.procedure
+
+
+
+org.apache.hadoop.hbase.procedure2
+
+
+
+org.apache.hadoop.hbase.procedure2.store
+
+
+
+org.apache.hadoop.hbase.procedure2.store.wal
+
+
+
+org.apache.hadoop.hbase.procedure2.util
+
+
+
+org.apache.hadoop.hbase.quotas
+
+
+
+org.apache.hadoop.hbase.quotas.policies
+
+
+
+org.apache.hadoop.hbase.regionserver
+
+
+
+org.apache.hadoop.hbase.regionserver.compactions
+
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher
+
+
+
+org.apache.hadoop.hbase.regionserver.throttle
+
+
+
+org.apache.hadoop.hbase.regionserver.wal
+
+
+
+org.apache.hadoop.hbase.replication
+
+
+
+org.apache.hadoop.hbase.replication.master
+
+
+
+org.apache.hadoop.hbase.replication.multiwal
+
+
+
+org.apache.hadoop.hbase.replication.regionserver
+
+
+
+org.apache.hadoop.hbase.rest
+
+
+
+org.apache.hadoop.hbase.rest.client
+
+
+
+org.apache.hadoop.hbase.rest.model
+
+
+
+org.apache.hadoop.hbase.rsgroup
+
+
+
+org.apache.hadoop.hbase.security
+
+
+
+org.apache.hadoop.hbase.security.access
+
+
+
+org.apache.hadoop.hbase.security.token
+
+
+
+org.apache.hadoop.hbase.security.visibility
+
+
+
+org.apache.hadoop.hbase.snapshot
+
+
+
+org.apache.hadoop.hbase.thrift
+
+
+
+org.apache.hadoop.hbase.thrift2
+
+
+

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.ProgressCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.ProgressCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.ProgressCommand.html
index eb9e252..667152a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.ProgressCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.ProgressCommand.html
@@ -28,22 +28,22 @@
 020
 021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
 022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
-029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
+038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
 039
 040import java.io.IOException;
 041import java.net.URI;
@@ -70,194 +70,194 @@
 062import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
 063import 
org.apache.hadoop.hbase.client.Connection;
 064import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-065import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-066import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-067import 
org.apache.yetus.audience.InterfaceAudience;
-068
-069/**
-070 * General backup commands, options and 
usage messages
-071 */
-072
+065import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+066import 
org.apache.yetus.audience.InterfaceAudience;
+067
+068import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+069
+070/**
+071 * General backup commands, options and 
usage messages
+072 */
 073@InterfaceAudience.Private
 074public final class BackupCommands {
-075
-076  public final static String 
INCORRECT_USAGE = "Incorrect usage";
-077
-078  public final static String 
TOP_LEVEL_NOT_ALLOWED =
-079  "Top level (root) folder is not 
allowed to be a backup destination";
-080
-081  public static final String USAGE = 

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
new file mode 100644
index 000..03a0b2a
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
@@ -0,0 +1,830 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import java.util.List;
+021
+022import org.apache.hadoop.hbase.Cell;
+023import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+024import 
org.apache.yetus.audience.InterfaceAudience;
+025import 
org.apache.yetus.audience.InterfaceStability;
+026import 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics;
+027
+028/**
+029 * ScannerContext instances encapsulate 
limit tracking AND progress towards those limits during
+030 * invocations of {@link 
InternalScanner#next(java.util.List)} and
+031 * {@link 
RegionScanner#next(java.util.List)}.
+032 * p
+033 * A ScannerContext instance should be 
updated periodically throughout execution whenever progress
+034 * towards a limit has been made. Each 
limit can be checked via the appropriate checkLimit method.
+035 * p
+036 * Once a limit has been reached, the 
scan will stop. The invoker of
+037 * {@link 
InternalScanner#next(java.util.List)} or {@link 
RegionScanner#next(java.util.List)} can
+038 * use the appropriate check*Limit 
methods to see exactly which limits have been reached.
+039 * Alternatively, {@link 
#checkAnyLimitReached(LimitScope)} is provided to see if ANY limit was
+040 * reached
+041 * p
+042 * {@link NoLimitScannerContext#NO_LIMIT} 
is an immutable static definition that can be used
+043 * whenever a {@link ScannerContext} is 
needed but limits do not need to be enforced.
+044 * p
+045 * NOTE: It is important that this class 
only ever expose setter methods that can be safely skipped
+046 * when limits should be NOT enforced. 
This is because of the necessary immutability of the class
+047 * {@link NoLimitScannerContext}. If a 
setter cannot be safely skipped, the immutable nature of
+048 * {@link NoLimitScannerContext} will 
lead to incorrect behavior.
+049 */
+050@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+051@InterfaceStability.Evolving
+052public class ScannerContext {
+053
+054  LimitFields limits;
+055  /**
+056   * A different set of progress fields. 
Only include batch, dataSize and heapSize. Compare to
+057   * LimitFields, ProgressFields doesn't 
contain time field. As we save a deadline in LimitFields,
+058   * so use {@link 
System#currentTimeMillis()} directly when check time limit.
+059   */
+060  ProgressFields progress;
+061
+062  /**
+063   * The state of the scanner after the 
invocation of {@link InternalScanner#next(java.util.List)}
+064   * or {@link 
RegionScanner#next(java.util.List)}.
+065   */
+066  NextState scannerState;
+067  private static final NextState 
DEFAULT_STATE = NextState.MORE_VALUES;
+068
+069  /**
+070   * Used as an indication to invocations 
of {@link InternalScanner#next(java.util.List)} and
+071   * {@link 
RegionScanner#next(java.util.List)} that, if true, the progress tracked within 
this
+072   * {@link ScannerContext} instance 
should be considered while evaluating the limits. Useful for
+073   * enforcing a set of limits across 
multiple calls (i.e. the limit may not be reached in a single
+074   * invocation, but any progress made 
should be considered in future invocations)
+075   * p
+076   * Defaulting this value to false means 
that, by default, any tracked progress will be wiped clean
+077   * on invocations to {@link 
InternalScanner#next(java.util.List)} and
+078   * {@link 
RegionScanner#next(java.util.List)} and the call will be treated as though no 
progress
+079   

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 5f95040..0bc5dcd 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -492,8 +492,8 @@ implements 
 void
-abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg,
- http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablet)
+abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablecause)
 Cause the server to exit without closing the regions it is 
serving, the log
  it is using and without notifying the master.
 
@@ -1305,7 +1305,9 @@ implements 
 void
-shutdown()
+shutdown()
+Shutdown the cluster.
+
 
 
 long
@@ -3478,8 +3480,8 @@ implements 
 
 abort
-publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg,
-  http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablet)
+publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablecause)
 Description copied from 
class:HRegionServer
 Cause the server to exit without closing the regions it is 
serving, the log
  it is using and without notifying the master. Used unit testing and on
@@ -3490,8 +3492,8 @@ implements Overrides:
 abortin
 classHRegionServer
 Parameters:
-msg - the reason we are aborting
-t - the exception that caused the abort, or null
+reason - the reason we are aborting
+cause - the exception that caused the abort, or null
 
 
 
@@ -3501,7 +3503,7 @@ implements 
 
 getZooKeeper
-publicZKWatchergetZooKeeper()
+publicZKWatchergetZooKeeper()
 Description copied from 
interface:Server
 Gets the ZooKeeper instance for this server.
 
@@ -3518,7 +3520,7 @@ implements 
 
 getMasterCoprocessorHost
-publicMasterCoprocessorHostgetMasterCoprocessorHost()
+publicMasterCoprocessorHostgetMasterCoprocessorHost()
 
 Specified by:
 getMasterCoprocessorHostin
 interfaceMasterServices
@@ -3533,7 +3535,7 @@ implements 
 
 getMasterQuotaManager
-publicMasterQuotaManagergetMasterQuotaManager()
+publicMasterQuotaManagergetMasterQuotaManager()
 
 Specified by:
 getMasterQuotaManagerin
 interfaceMasterServices
@@ -3548,7 +3550,7 @@ implements 
 
 getMasterProcedureExecutor
-publicProcedureExecutorMasterProcedureEnvgetMasterProcedureExecutor()
+publicProcedureExecutorMasterProcedureEnvgetMasterProcedureExecutor()
 
 Specified by:
 getMasterProcedureExecutorin
 interfaceMasterServices
@@ -3563,7 +3565,7 @@ implements 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 Specified by:
 getServerNamein
 interfaceServer
@@ -3580,7 +3582,7 @@ implements 
 
 getAssignmentManager
-publicAssignmentManagergetAssignmentManager()
+publicAssignmentManagergetAssignmentManager()
 
 Specified by:
 getAssignmentManagerin
 interfaceMasterServices
@@ -3595,7 +3597,7 @@ implements 
 
 getCatalogJanitor
-publicCatalogJanitorgetCatalogJanitor()
+publicCatalogJanitorgetCatalogJanitor()
 
 Specified by:
 getCatalogJanitorin
 interfaceMasterServices
@@ -3610,7 +3612,7 @@ implements 
 
 getRegionServerFatalLogBuffer
-publicMemoryBoundedLogMessageBuffergetRegionServerFatalLogBuffer()
+publicMemoryBoundedLogMessageBuffergetRegionServerFatalLogBuffer()
 
 
 
@@ -3619,8 +3621,10 @@ implements 
 
 shutdown
-publicvoidshutdown()
+publicvoidshutdown()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Shutdown the cluster.
+ Master runs a coordinated stop of all RegionServers and then itself.
 
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3633,7 +3637,7 @@ implements 
 
 stopMaster
-publicvoidstopMaster()
+publicvoidstopMaster()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3647,7 +3651,7 @@ implements 
 
 stop

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/dependencies.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/dependencies.html 
b/hbase-build-configuration/hbase-archetypes/dependencies.html
index 6447c76..2370c1e 100644
--- a/hbase-build-configuration/hbase-archetypes/dependencies.html
+++ b/hbase-build-configuration/hbase-archetypes/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Project Dependencies
 
@@ -330,7 +330,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-18
+  Last Published: 
2018-01-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html 
b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
index aaef905..1705535 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Reactor Dependency 
Convergence
 
@@ -912,7 +912,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-18
+  Last Published: 
2018-01-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/dependency-info.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/dependency-info.html 
b/hbase-build-configuration/hbase-archetypes/dependency-info.html
index 6d507f5..e4f1353 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-info.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Dependency Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-18
+  Last Published: 
2018-01-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/dependency-management.html 
b/hbase-build-configuration/hbase-archetypes/dependency-management.html
index 1634e57..677246d 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-management.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Project Dependency 
Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-18
+  Last Published: 
2018-01-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
index deb4aca..b7173c1 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder  Project 
Dependencies
 
@@ -330,7 +330,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-18
+  Last Published: 
2018-01-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html
new file mode 100644
index 000..085cfc6
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html
@@ -0,0 +1,534 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+AbstractTestCITimeout.SleepAndFailFirstTime (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class AbstractTestCITimeout.SleepAndFailFirstTime
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime
+
+
+
+
+
+
+
+All Implemented Interfaces:
+org.apache.hadoop.hbase.Coprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
+
+
+Enclosing class:
+AbstractTestCITimeout
+
+
+
+public static class AbstractTestCITimeout.SleepAndFailFirstTime
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
+This copro sleeps 20 second. The first call it fails. The 
second time, it works.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Coprocessor
+org.apache.hadoop.hbase.Coprocessor.State
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.coprocessor.RegionObserver
+org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+ct
+
+
+(package private) static long
+DEFAULT_SLEEP_TIME
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SLEEP_TIME_CONF_KEY
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+sleepTime
+
+
+
+
+
+
+Fields inherited from 
interfaceorg.apache.hadoop.hbase.Coprocessor
+PRIORITY_HIGHEST, PRIORITY_LOWEST, PRIORITY_SYSTEM, PRIORITY_USER, 
VERSION
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SleepAndFailFirstTime()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in 
java.util">Optionalorg.apache.hadoop.hbase.coprocessor.RegionObserver
+getRegionObserver()
+
+
+void
+postOpen(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc)
+
+
+void
+preDelete(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmente,
+ org.apache.hadoop.hbase.client.Deletedelete,
+ org.apache.hadoop.hbase.wal.WALEditedit,
+ 
org.apache.hadoop.hbase.client.Durabilitydurability)
+
+
+void

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
index fadf667..14b2b69 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
@@ -7,269 +7,269 @@
 
 
 001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.wal;
 019
-020
-021package org.apache.hadoop.hbase.wal;
-022
-023import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.IOException;
-026import java.io.InterruptedIOException;
-027import java.util.Collections;
-028import java.util.List;
-029import java.util.OptionalLong;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031
-032import 
org.apache.hadoop.conf.Configuration;
-033import org.apache.hadoop.fs.FileSystem;
-034import org.apache.hadoop.fs.Path;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038// imports for things that haven't moved 
from regionserver.wal yet.
-039import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-040import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-041import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-042import 
org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
-043import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-044import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-045import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-046import 
org.apache.hadoop.hbase.wal.WAL.Reader;
-047import 
org.apache.hadoop.hbase.wal.WALProvider.Writer;
-048
-049/**
-050 * Entry point for users of the Write 
Ahead Log.
-051 * Acts as the shim between internal use 
and the particular WALProvider we use to handle wal
-052 * requests.
-053 *
-054 * Configure which provider gets used 
with the configuration setting "hbase.wal.provider". Available
-055 * implementations:
-056 * ul
-057 *   
liemdefaultProvider/em : whatever provider is standard 
for the hbase version. Currently
-058 *  
"filesystem"/li
-059 *   
liemfilesystem/em : a provider that will run on top of 
an implementation of the Hadoop
-060 * FileSystem 
interface, normally HDFS./li
-061 *   
liemmultiwal/em : a provider that will use multiple 
"filesystem" wal instances per region
-062 *   
server./li
-063 * /ul
-064 *
-065 * Alternatively, you may provide a 
custom implementation of {@link WALProvider} by class name.
-066 */
-067@InterfaceAudience.Private
-068public class WALFactory implements 
WALFileLengthProvider {
-069
-070  private static final Logger LOG = 
LoggerFactory.getLogger(WALFactory.class);
-071
-072  /**
-073   * Maps between configuration names for 
providers and implementation classes.
-074   */
-075  static enum Providers {
-076
defaultProvider(AsyncFSWALProvider.class),
-077

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index aedcf26..5e14bfd 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class RawAsyncHBaseAdmin
+class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -219,17 +219,21 @@ implements 
 private class
-RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer
+RawAsyncHBaseAdmin.ReplicationProcedureBiConsumer
 
 
+private class
+RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer
+
+
 private static interface
 RawAsyncHBaseAdmin.TableOperator
 
-
+
 private class
 RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
-
+
 private class
 RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer
 
@@ -424,26 +428,37 @@ implements 
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
index ad79e6d..8bee2e4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
@@ -287,7 +287,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerCon
 fig, postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitA
 ction, postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot, postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEn
 abled, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup, preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer, preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction, preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction, preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServers, preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled, preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeer
 Config, postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsComm
 itAction, postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/CellCodec.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/codec/CellCodec.html 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodec.html
index b271665..0b69c39 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/CellCodec.html
+++ b/devapidocs/org/apache/hadoop/hbase/codec/CellCodec.html
@@ -344,6 +344,6 @@ implements 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellDecoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellDecoder.html 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellDecoder.html
index 78b7b48..51aa84f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellDecoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellDecoder.html
@@ -379,6 +379,6 @@ extends 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellEncoder.html
index f6aeec0..342a8a2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellEncoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.CellEncoder.html
@@ -348,6 +348,6 @@ extends 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.html 
b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.html
index fe3e1bd..d831610 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.html
+++ b/devapidocs/org/apache/hadoop/hbase/codec/CellCodecWithTags.html
@@ -344,6 +344,6 @@ implements 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/Codec.Decoder.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/codec/Codec.Decoder.html 
b/devapidocs/org/apache/hadoop/hbase/codec/Codec.Decoder.html
index 0498c19..a78db86 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/Codec.Decoder.html
+++ b/devapidocs/org/apache/hadoop/hbase/codec/Codec.Decoder.html
@@ -200,6 +200,6 @@ extends 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/Codec.Encoder.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/codec/Codec.Encoder.html 
b/devapidocs/org/apache/hadoop/hbase/codec/Codec.Encoder.html
index 169080c..fe0e3e4 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/Codec.Encoder.html
+++ b/devapidocs/org/apache/hadoop/hbase/codec/Codec.Encoder.html
@@ -199,6 +199,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/codec/Codec.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/codec/Codec.html 
b/devapidocs/org/apache/hadoop/hbase/codec/Codec.html
index 324c051..3619789 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/Codec.html
+++ 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index fb847e9..bbba19f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -26,74 +26,69 @@
 018 */
 019package 
org.apache.hadoop.hbase.regionserver;
 020
-021
-022import 
org.apache.yetus.audience.InterfaceAudience;
-023import 
org.apache.hadoop.hbase.util.ClassSize;
-024import 
org.apache.hadoop.hbase.CellComparator;
-025import 
org.apache.hadoop.hbase.io.TimeRange;
+021import java.util.Collections;
+022import java.util.List;
+023import 
org.apache.hadoop.hbase.CellComparator;
+024import 
org.apache.hadoop.hbase.util.ClassSize;
+025import 
org.apache.yetus.audience.InterfaceAudience;
 026
-027import java.util.ArrayList;
-028import java.util.Arrays;
-029import java.util.List;
-030
-031/**
-032 * ImmutableSegment is an abstract class 
that extends the API supported by a {@link Segment},
-033 * and is not needed for a {@link 
MutableSegment}.
-034 */
-035@InterfaceAudience.Private
-036public abstract class ImmutableSegment 
extends Segment {
-037
-038  public static final long DEEP_OVERHEAD 
= Segment.DEEP_OVERHEAD + ClassSize.NON_SYNC_TIMERANGE_TRACKER;
-039
-040  // each sub-type of immutable segment 
knows whether it is flat or not
-041  protected abstract boolean 
canBeFlattened();
+027/**
+028 * ImmutableSegment is an abstract class 
that extends the API supported by a {@link Segment},
+029 * and is not needed for a {@link 
MutableSegment}.
+030 */
+031@InterfaceAudience.Private
+032public abstract class ImmutableSegment 
extends Segment {
+033
+034  public static final long DEEP_OVERHEAD 
= Segment.DEEP_OVERHEAD + ClassSize.NON_SYNC_TIMERANGE_TRACKER;
+035
+036  // each sub-type of immutable segment 
knows whether it is flat or not
+037  protected abstract boolean 
canBeFlattened();
+038
+039  public int getNumUniqueKeys() {
+040return 
getCellSet().getNumUniqueKeys();
+041  }
 042
-043  public int getNumUniqueKeys() {
-044return 
getCellSet().getNumUniqueKeys();
-045  }
-046
-047  /  CONSTRUCTORS  
/
-048  
/**
-049   * Empty C-tor to be used only for 
CompositeImmutableSegment
-050   */
-051  protected 
ImmutableSegment(CellComparator comparator) {
-052super(comparator, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
-053  }
-054
-055  
/**
-056   * C-tor to be used to build the 
derived classes
-057   */
-058  protected ImmutableSegment(CellSet cs, 
CellComparator comparator, MemStoreLAB memStoreLAB) {
-059super(cs, comparator, memStoreLAB, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
-060  }
-061
-062  
/**
-063   * Copy C-tor to be used when new 
CSLMImmutableSegment (derived) is being built from a Mutable one.
-064   * This C-tor should be used when 
active MutableSegment is pushed into the compaction
-065   * pipeline and becomes an 
ImmutableSegment.
-066   */
-067  protected ImmutableSegment(Segment 
segment) {
-068super(segment);
-069  }
-070
-071  /  PUBLIC METHODS  
/
+043  /  CONSTRUCTORS  
/
+044  
/**
+045   * Empty C-tor to be used only for 
CompositeImmutableSegment
+046   */
+047  protected 
ImmutableSegment(CellComparator comparator) {
+048super(comparator, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
+049  }
+050
+051  
/**
+052   * C-tor to be used to build the 
derived classes
+053   */
+054  protected ImmutableSegment(CellSet cs, 
CellComparator comparator, MemStoreLAB memStoreLAB) {
+055super(cs, comparator, memStoreLAB, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
+056  }
+057
+058  
/**
+059   * Copy C-tor to be used when new 
CSLMImmutableSegment (derived) is being built from a Mutable one.
+060   * This C-tor should be used when 
active MutableSegment is pushed into the compaction
+061   * pipeline and becomes an 
ImmutableSegment.
+062   */
+063  protected ImmutableSegment(Segment 
segment) {
+064super(segment);
+065  }
+066
+067  /  PUBLIC METHODS  
/
+068
+069  public int getNumOfSegments() {

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler.html
index fa49855..a2205b3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler.html
@@ -100,13 +100,13 @@ var activeTableTab = "activeTableTab";
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler
 
 
 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler, 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler, 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOutboundHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler, 
org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandler, 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandler
 
 
 Enclosing class:
@@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
 
 
 private static final class FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler
-extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
+extends org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler
 
 
 
@@ -147,11 +147,11 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
 Nested Class Summary
 
-
+
 
 
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable
+Nested classes/interfaces inherited from 
interfaceorg.apache.hbase.thirdparty.io.netty.channel.ChannelHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler.Sharable
 
 
 
@@ -172,7 +172,7 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandlerconf
 
 
-private 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promisehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+private 
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promisehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 promise
 
 
@@ -206,12 +206,12 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandlerConstructor and Description
 
 
-SaslNegotiateHandler(org.apache.hadoop.conf.Configurationconf,
+SaslNegotiateHandler(org.apache.hadoop.conf.Configurationconf,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringusername,
 char[]password,
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsaslProps,
 inttimeoutMs,
-
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promisehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in 
java.lang">Voidpromise)
+
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promisehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in 
java.lang">Voidpromise)
 
 
 
@@ -230,11 +230,11 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
 
 void
-channelInactive(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx)
+channelInactive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx)
 
 
 void

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/Cell.DataType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.DataType.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.DataType.html
deleted file mode 100644
index 7373038..000
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.DataType.html
+++ /dev/null
@@ -1,272 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.Cell.DataType (Apache HBase 
3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of Classorg.apache.hadoop.hbase.Cell.DataType
-
-
-
-
-
-Packages that use Cell.DataType
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase
-
-
-
-org.apache.hadoop.hbase.filter
-
-Provides row-level filters applied to HRegion scan results 
during calls to
- ResultScanner.next().
-
-
-
-
-
-
-
-
-
-
-Uses of Cell.DataType in org.apache.hadoop.hbase
-
-Methods in org.apache.hadoop.hbase
 that return Cell.DataType
-
-Modifier and Type
-Method and Description
-
-
-
-Cell.DataType
-ByteBufferKeyOnlyKeyValue.getType()
-
-
-Cell.DataType
-Cell.getType()
-Returns the type of cell in a human readable format using 
Cell.DataType
-
-
-
-Cell.DataType
-PrivateCellUtil.FirstOnRowCell.getType()
-
-
-Cell.DataType
-PrivateCellUtil.FirstOnRowByteBufferCell.getType()
-
-
-Cell.DataType
-PrivateCellUtil.LastOnRowByteBufferCell.getType()
-
-
-Cell.DataType
-PrivateCellUtil.LastOnRowCell.getType()
-
-
-Cell.DataType
-PrivateCellUtil.FirstOnRowDeleteFamilyCell.getType()
-
-
-default Cell.DataType
-ExtendedCell.getType()
-Returns the type of cell in a human readable format using 
Cell.DataType
-
-
-
-static Cell.DataType
-PrivateCellUtil.toDataType(bytetype)
-
-
-static Cell.DataType
-Cell.DataType.valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
-Returns the enum constant of this type with the specified 
name.
-
-
-
-static Cell.DataType[]
-Cell.DataType.values()
-Returns an array containing the constants of this enum 
type, in
-the order they are declared.
-
-
-
-
-
-Methods in org.apache.hadoop.hbase
 with parameters of type Cell.DataType
-
-Modifier and Type
-Method and Description
-
-
-
-CellBuilder
-CellBuilder.setType(Cell.DataTypetype)
-
-
-RawCellBuilder
-RawCellBuilder.setType(Cell.DataTypetype)
-
-
-ExtendedCellBuilder
-ExtendedCellBuilder.setType(Cell.DataTypetype)
-
-
-ExtendedCellBuilder
-ExtendedCellBuilderImpl.setType(Cell.DataTypetype)
-
-
-static KeyValue.Type
-PrivateCellUtil.toTypeByte(Cell.DataTypetype)
-
-
-
-
-
-
-
-Uses of Cell.DataType in org.apache.hadoop.hbase.filter
-
-Methods in org.apache.hadoop.hbase.filter
 that return Cell.DataType
-
-Modifier and Type
-Method and Description
-
-
-
-Cell.DataType
-KeyOnlyFilter.KeyOnlyCell.getType()
-
-
-Cell.DataType
-KeyOnlyFilter.KeyOnlyByteBufferCell.getType()
-
-
-
-
-
-
-
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html
new file mode 100644
index 000..5019eca
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html
@@ -0,0 +1,272 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.Cell.Type (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableBasedReplicationQueuesClientImpl
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.ReplicationTableBase
-
-
-org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesClientImpl
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-ReplicationQueuesClient
-
-
-
-@InterfaceAudience.Private
-public class TableBasedReplicationQueuesClientImpl
-extends ReplicationTableBase
-implements ReplicationQueuesClient
-Implements the ReplicationQueuesClient interface on top of 
the Replication Table. It utilizes
- the ReplicationTableBase to access the Replication Table.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.replication.ReplicationTableBase
-abortable,
 CF_QUEUE,
 COL_QUEUE_OWNER,
 COL_QUEUE_OWNER_HISTORY,
 conf,
 QUEUE_HISTORY_DELIMITER,
 REPLICATION_TABLE_NAME,
 ROW_KEY_DELIMITER
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TableBasedReplicationQueuesClientImpl(org.apache.hadoop.conf.Configurationconf,
- Abortableabortable)
-
-
-TableBasedReplicationQueuesClientImpl(ReplicationQueuesClientArgumentsargs)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllPeersFromHFileRefsQueue()
-Get list of all peers from hfile reference queue.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllQueues(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringserverName)
-Get a list of all queues for the specified region 
server.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllWALs()
-Load all wals in all replication queues from ZK.
-
-
-
-int
-getHFileRefsNodeChangeVersion()
-Get the change version number of replication hfile 
references node.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getListOfReplicators()
-Get a list of all region servers that have outstanding 
replication queues.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 9c710d6..38865a3 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -36,695 +36,736 @@
 028import java.util.NavigableMap;
 029import java.util.TreeMap;
 030import java.util.UUID;
-031import org.apache.hadoop.hbase.Cell;
-032import 
org.apache.hadoop.hbase.CellScannable;
-033import 
org.apache.hadoop.hbase.CellScanner;
-034import 
org.apache.hadoop.hbase.CellUtil;
-035import 
org.apache.hadoop.hbase.HConstants;
-036import 
org.apache.hadoop.hbase.KeyValue;
-037import 
org.apache.hadoop.hbase.PrivateCellUtil;
-038import org.apache.hadoop.hbase.Tag;
-039import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-040import 
org.apache.hadoop.hbase.io.HeapSize;
-041import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-042import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-043import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
-044import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
-045import 
org.apache.hadoop.hbase.security.access.Permission;
-046import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-047import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.ClassSize;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-053import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-054import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
-055import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
-057
-058@InterfaceAudience.Public
-059public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
-060HeapSize {
-061  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
-062  // This
-063  ClassSize.OBJECT +
-064  // row + 
OperationWithAttributes.attributes
-065  2 * ClassSize.REFERENCE +
-066  // Timestamp
-067  1 * Bytes.SIZEOF_LONG +
-068  // durability
-069  ClassSize.REFERENCE +
-070  // familyMap
+031import java.util.stream.Collectors;
+032import org.apache.hadoop.hbase.Cell;
+033import 
org.apache.hadoop.hbase.CellScannable;
+034import 
org.apache.hadoop.hbase.CellScanner;
+035import 
org.apache.hadoop.hbase.CellUtil;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.KeyValue;
+038import 
org.apache.hadoop.hbase.PrivateCellUtil;
+039import org.apache.hadoop.hbase.Tag;
+040import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+041import 
org.apache.hadoop.hbase.io.HeapSize;
+042import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+043import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+044import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
+045import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
+046import 
org.apache.hadoop.hbase.security.access.Permission;
+047import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
+048import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ClassSize;
+051import 
org.apache.yetus.audience.InterfaceAudience;
+052
+053import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+054import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+055import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+056import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
+057import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
+058import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
+059
+060@InterfaceAudience.Public
+061public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
+062HeapSize {
+063  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
+064  // This
+065  ClassSize.OBJECT +
+066  // row + 
OperationWithAttributes.attributes
+067  2 * ClassSize.REFERENCE +
+068  // Timestamp
+069  1 * Bytes.SIZEOF_LONG +
+070  // durability
 071  ClassSize.REFERENCE +
 072  // familyMap
-073  ClassSize.TREEMAP +
-074  // priority
-075  ClassSize.INTEGER
-076  );
-077
-078  /**
-079   * The attribute for storing the 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
index 51d92c2..86fc15e 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
@@ -44,2578 +44,2580 @@
 036import java.util.Iterator;
 037import java.util.List;
 038
-039import com.google.protobuf.ByteString;
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import org.apache.hadoop.hbase.Cell;
-043import 
org.apache.hadoop.hbase.CellComparator;
-044import 
org.apache.hadoop.hbase.KeyValue;
-045import 
org.apache.hadoop.io.RawComparator;
-046import 
org.apache.hadoop.io.WritableComparator;
-047import 
org.apache.hadoop.io.WritableUtils;
-048import 
org.apache.yetus.audience.InterfaceAudience;
-049import sun.misc.Unsafe;
-050
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+039import org.apache.hadoop.hbase.Cell;
+040import 
org.apache.hadoop.hbase.CellComparator;
+041import 
org.apache.hadoop.hbase.KeyValue;
+042import 
org.apache.hadoop.io.RawComparator;
+043import 
org.apache.hadoop.io.WritableComparator;
+044import 
org.apache.hadoop.io.WritableUtils;
+045import 
org.apache.yetus.audience.InterfaceAudience;
+046import org.slf4j.Logger;
+047import org.slf4j.LoggerFactory;
+048
+049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+051
+052import com.google.protobuf.ByteString;
 053
-054/**
-055 * Utility class that handles byte 
arrays, conversions to/from other types,
-056 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-057 * HashSets, and can be used as key in 
maps or trees.
-058 */
-059@SuppressWarnings("restriction")
-060@InterfaceAudience.Public
-061@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-062
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-063justification="It has been like this 
forever")
-064public class Bytes implements 
ComparableBytes {
-065
-066  // Using the charset canonical name for 
String/byte[] conversions is much
-067  // more efficient due to use of cached 
encoders/decoders.
-068  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-069
-070  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-071  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-072
-073  private static final Log LOG = 
LogFactory.getLog(Bytes.class);
+054import sun.misc.Unsafe;
+055
+056/**
+057 * Utility class that handles byte 
arrays, conversions to/from other types,
+058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
+059 * HashSets, and can be used as key in 
maps or trees.
+060 */
+061@SuppressWarnings("restriction")
+062@InterfaceAudience.Public
+063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
+065justification="It has been like this 
forever")
+066public class Bytes implements 
ComparableBytes {
+067
+068  // Using the charset canonical name for 
String/byte[] conversions is much
+069  // more efficient due to use of cached 
encoders/decoders.
+070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
+071
+072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
+073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
 074
-075  /**
-076   * Size of boolean in bytes
-077   */
-078  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-079
-080  /**
-081   * Size of byte in bytes
-082   */
-083  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-084
-085  /**
-086   * Size of char in bytes
-087   */
-088  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-089
-090  /**
-091   * Size of double in bytes
-092   */
-093  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-094
-095  /**
-096   * Size of float in bytes
-097   */
-098  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-099
-100  /**
-101   * Size of int in bytes
-102   */
-103  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-104
-105  /**
-106   * Size of long in bytes
-107   */
-108  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-109
-110  /**
-111   * Size of short in bytes
-112   */
-113  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-114
-115  /**
-116   * Mask to apply to a long to reveal 
the lower int only. Use like 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.RowRange.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.RowRange.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.RowRange.html
index af883ab..0ab7c24 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.RowRange.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.RowRange.html
@@ -154,372 +154,374 @@
 146  /**
 147   * @return The filter serialized using 
pb
 148   */
-149  public byte[] toByteArray() {
-150
FilterProtos.MultiRowRangeFilter.Builder builder = 
FilterProtos.MultiRowRangeFilter
-151.newBuilder();
-152for (RowRange range : rangeList) {
-153  if (range != null) {
-154FilterProtos.RowRange.Builder 
rangebuilder = FilterProtos.RowRange.newBuilder();
-155if (range.startRow != null)
-156  
rangebuilder.setStartRow(UnsafeByteOperations.unsafeWrap(range.startRow));
-157
rangebuilder.setStartRowInclusive(range.startRowInclusive);
-158if (range.stopRow != null)
-159  
rangebuilder.setStopRow(UnsafeByteOperations.unsafeWrap(range.stopRow));
-160
rangebuilder.setStopRowInclusive(range.stopRowInclusive);
-161
builder.addRowRangeList(rangebuilder.build());
-162  }
-163}
-164return 
builder.build().toByteArray();
-165  }
-166
-167  /**
-168   * @param pbBytes A pb serialized 
instance
-169   * @return An instance of 
MultiRowRangeFilter
-170   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-171   */
-172  public static MultiRowRangeFilter 
parseFrom(final byte[] pbBytes)
-173  throws DeserializationException {
-174FilterProtos.MultiRowRangeFilter 
proto;
-175try {
-176  proto = 
FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes);
-177} catch 
(InvalidProtocolBufferException e) {
-178  throw new 
DeserializationException(e);
-179}
-180int length = 
proto.getRowRangeListCount();
-181ListFilterProtos.RowRange 
rangeProtos = proto.getRowRangeListList();
-182ListRowRange rangeList = new 
ArrayList(length);
-183for (FilterProtos.RowRange rangeProto 
: rangeProtos) {
-184  RowRange range = new 
RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow()
-185  .toByteArray() : null, 
rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ?
-186  
rangeProto.getStopRow().toByteArray() : null, 
rangeProto.getStopRowInclusive());
-187  rangeList.add(range);
-188}
-189return new 
MultiRowRangeFilter(rangeList);
-190  }
-191
-192  /**
-193   * @param o the filter to compare
-194   * @return true if and only if the 
fields of the filter that are serialized are equal to the
-195   * corresponding fields in 
other. Used for testing.
-196   */
-197  boolean areSerializedFieldsEqual(Filter 
o) {
-198if (o == this)
-199  return true;
-200if (!(o instanceof 
MultiRowRangeFilter))
-201  return false;
-202
-203MultiRowRangeFilter other = 
(MultiRowRangeFilter) o;
-204if (this.rangeList.size() != 
other.rangeList.size())
-205  return false;
-206for (int i = 0; i  
rangeList.size(); ++i) {
-207  RowRange thisRange = 
this.rangeList.get(i);
-208  RowRange otherRange = 
other.rangeList.get(i);
-209  if 
(!(Bytes.equals(thisRange.startRow, otherRange.startRow)  
Bytes.equals(
-210  thisRange.stopRow, 
otherRange.stopRow)  (thisRange.startRowInclusive ==
-211  otherRange.startRowInclusive) 
 (thisRange.stopRowInclusive ==
-212  otherRange.stopRowInclusive))) 
{
-213return false;
-214  }
-215}
-216return true;
-217  }
-218
-219  /**
-220   * calculate the position where the row 
key in the ranges list.
-221   *
-222   * @param rowKey the row key to 
calculate
-223   * @return index the position of the 
row key
-224   */
-225  private int getNextRangeIndex(byte[] 
rowKey) {
-226RowRange temp = new RowRange(rowKey, 
true, null, true);
-227int index = 
Collections.binarySearch(rangeList, temp);
-228if (index  0) {
-229  int insertionPosition = -index - 
1;
-230  // check if the row key in the 
range before the insertion position
-231  if (insertionPosition != 0 
 rangeList.get(insertionPosition - 1).contains(rowKey)) {
-232return insertionPosition - 1;
-233  }
-234  // check if the row key is before 
the first range
-235  if (insertionPosition == 0 
 !rangeList.get(insertionPosition).contains(rowKey)) {
-236return ROW_BEFORE_FIRST_RANGE;
-237  }
-238  if (!initialized) {
-239initialized = true;
-240  }
-241  return insertionPosition;
-242}
-243// the row key equals one of the 
start keys, and the the range exclude the start key
-244

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index f1a2443..a469e93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -1350,415 +1350,415 @@
 1342return delete;
 1343  }
 1344
-1345  public static Put 
makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
-1346byte[] seqBytes = 
Bytes.toBytes(seq);
-1347return new Put(encodedRegionName)
-1348
.addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
-1349
.addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
-1350  }
-1351
-1352
-1353  public static Put 
makeDaughterPut(byte[] encodedRegionName, byte[] value) {
-1354return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1355daughterNameCq, value);
-1356  }
-1357
-1358  public static Put makeParentPut(byte[] 
encodedRegionName, byte[] value) {
-1359return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1360parentNameCq, value);
-1361  }
-1362
-1363  /**
-1364   * Adds split daughters to the Put
-1365   */
-1366  public static Put 
addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
-1367if (splitA != null) {
-1368  put.addImmutable(
-1369HConstants.CATALOG_FAMILY, 
HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
-1370}
-1371if (splitB != null) {
-1372  put.addImmutable(
-1373HConstants.CATALOG_FAMILY, 
HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
-1374}
-1375return put;
-1376  }
-1377
-1378  /**
-1379   * Put the passed 
codeputs/code to the codehbase:meta/code 
table.
-1380   * Non-atomic for multi puts.
-1381   * @param connection connection we're 
using
-1382   * @param puts Put to add to 
hbase:meta
-1383   * @throws IOException
-1384   */
-1385  public static void 
putToMetaTable(final Connection connection, final Put... puts)
-1386throws IOException {
-1387put(getMetaHTable(connection), 
Arrays.asList(puts));
-1388  }
-1389
-1390  /**
-1391   * @param t Table to use (will be 
closed when done).
-1392   * @param puts puts to make
-1393   * @throws IOException
-1394   */
-1395  private static void put(final Table t, 
final ListPut puts) throws IOException {
-1396try {
-1397  if (METALOG.isDebugEnabled()) {
-1398
METALOG.debug(mutationsToString(puts));
-1399  }
-1400  t.put(puts);
-1401} finally {
-1402  t.close();
-1403}
-1404  }
-1405
-1406  /**
-1407   * Put the passed 
codeps/code to the codehbase:meta/code table.
-1408   * @param connection connection we're 
using
-1409   * @param ps Put to add to 
hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
-1413throws IOException {
-1414Table t = 
getMetaHTable(connection);
-1415try {
-1416  if (METALOG.isDebugEnabled()) {
-1417
METALOG.debug(mutationsToString(ps));
-1418  }
-1419  t.put(ps);
-1420} finally {
-1421  t.close();
-1422}
-1423  }
-1424
-1425  /**
-1426   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1427   * @param connection connection we're 
using
-1428   * @param d Delete to add to 
hbase:meta
-1429   * @throws IOException
-1430   */
-1431  static void deleteFromMetaTable(final 
Connection connection, final Delete d)
-1432throws IOException {
-1433ListDelete dels = new 
ArrayList(1);
-1434dels.add(d);
-1435deleteFromMetaTable(connection, 
dels);
-1436  }
-1437
-1438  /**
-1439   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1440   * @param connection connection we're 
using
-1441   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1442   * @throws IOException
-1443   */
-1444  public static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1445throws IOException {
-1446Table t = 
getMetaHTable(connection);
-1447try {
-1448  if (METALOG.isDebugEnabled()) {
-1449
METALOG.debug(mutationsToString(deletes));
-1450  }
-1451  t.delete(deletes);
-1452} finally {
-1453  t.close();
-1454}
-1455  }
-1456
-1457  /**
-1458   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1459   * @param metaRows rows in 
hbase:meta
-1460   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1461   * @param 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
index aa20629..60f1ae3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -710,7 +710,7 @@ publicboolean
 
-PrevClass
+PrevClass
 NextClass
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index 50eb67f..c61f148 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -555,7 +555,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-publicIncrementsetTimeRange(longminStamp,
+publicIncrementsetTimeRange(longminStamp,
   longmaxStamp)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -564,6 +564,8 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
  periods of time (ie. counters that are partitioned by time).  By setting
  the range of valid times for this increment, you can potentially gain
  some performance with a more optimal Get operation.
+ Be careful adding the time range to this class as you will update the old 
cell if the
+ time range doesn't include the latest cells.
  
  This range is used as [minStamp, maxStamp).
 
@@ -583,7 +585,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimestamp
-publicIncrementsetTimestamp(longtimestamp)
+publicIncrementsetTimestamp(longtimestamp)
 Description copied from 
class:Mutation
 Set the timestamp of the delete.
 
@@ -598,7 +600,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-publicIncrementsetReturnResults(booleanreturnResults)
+publicIncrementsetReturnResults(booleanreturnResults)
 
 Overrides:
 setReturnResultsin
 classMutation
@@ -615,7 +617,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Overrides:
 isReturnResultsin
 classMutation
@@ -630,7 +632,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 numFamilies
-publicintnumFamilies()
+publicintnumFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -646,7 +648,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-publicbooleanhasFamilies()
+publicbooleanhasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 
 Returns:
@@ -660,7 +662,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
 Before 0.95, when you called Increment#getFamilyMap(), you 
got back
  a map of families to a list of Longs.  Now, Mutation.getFamilyCellMap()
 returns
  families by list of Cells.  This method has been added so you can have the
@@ -679,7 +681,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 Description 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
index 74795bd..e712982 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-Cell, SettableSequenceId
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
 
 
 Direct Known Subclasses:
@@ -131,7 +131,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class PrivateCellUtil.LastOnRowByteBufferCell
+private static class PrivateCellUtil.LastOnRowByteBufferCell
 extends PrivateCellUtil.EmptyByteBufferCell
 
 
@@ -152,18 +152,36 @@ extends Field and Description
 
 
+private static int
+FIXED_OVERHEAD
+
+
 private short
 rlength
 
-
+
 private int
 roffset
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
 rowBuff
 
 
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
+MAX_TAGS_LENGTH
+
 
 
 
@@ -217,13 +235,17 @@ extends byte
 getTypeByte()
 
+
+long
+heapSize()
+
 
 
 
 
 
 Methods inherited from classorg.apache.hadoop.hbase.PrivateCellUtil.EmptyByteBufferCell
-getFamilyArray,
 getFamilyByteBuffer,
 getFamilyLength,
 getFamilyOffset,
 getFamilyPosition,
 getQualifierArray,
 getQualifierByteBuffer,
 getQualifierLength, getQualifierOffset,
 getQualifierPosition,
 getRowArray,
 getRowOffset,
 getSequenceId,
 getTagsArray,
 getTagsByteBuffer,
 getTagsLength, getTagsOffset,
 getTagsPosition,
 getValueArray,
 getValueByteBuffer,
 getValueLength,
 getValueOffset,
 getValuePosition,
 setSequenceId
+getFamilyArray,
 getFamilyByteBuffer,
 getFamilyLength,
 getFamilyOffset,
 getFamilyPosition,
 getQualifierArray,
 getQualifierByteBuffer,
 getQualifierLength, getQualifierOffset,
 getQualifierPosition,
 getRowArray,
 getRowOffset,
 getSequenceId,
 getTagsArray,
 getTagsByteBuffer,
 getTagsLength, getTagsOffset,
 getTagsPosition,
 getValueArray,
 getValueByteBuffer,
 getValueLength,
 getValueOffset,
 getValuePosition,
 setSequenceId, setTimestamp,
 setTimestamp
 
 
 
@@ -232,6 +254,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
 
+
+
+
+
+Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+deepClone,
 getChunkId,
 getSerializedSize,
 write,
 write
+
+
+
+
+
+Methods inherited from 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderBlocks.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderBlocks.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderBlocks.html
index 1daa9e8..5636600 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderBlocks.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderBlocks.html
@@ -125,7 +125,7 @@
 117
 118  /**
 119   * Wrap a FileSystem object within a 
HFileSystem. The noChecksumFs and
-120   * writefs are both set to be the same 
specified fs. 
+120   * writefs are both set to be the same 
specified fs.
 121   * Do not verify hbase-checksums while 
reading data from filesystem.
 122   * @param fs Set the noChecksumFs and 
writeFs to this specified filesystem.
 123   */

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderWALBlocks.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderWALBlocks.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderWALBlocks.html
index 1daa9e8..5636600 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderWALBlocks.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.ReorderWALBlocks.html
@@ -125,7 +125,7 @@
 117
 118  /**
 119   * Wrap a FileSystem object within a 
HFileSystem. The noChecksumFs and
-120   * writefs are both set to be the same 
specified fs. 
+120   * writefs are both set to be the same 
specified fs.
 121   * Do not verify hbase-checksums while 
reading data from filesystem.
 122   * @param fs Set the noChecksumFs and 
writeFs to this specified filesystem.
 123   */

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.html
index 1daa9e8..5636600 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/fs/HFileSystem.html
@@ -125,7 +125,7 @@
 117
 118  /**
 119   * Wrap a FileSystem object within a 
HFileSystem. The noChecksumFs and
-120   * writefs are both set to be the same 
specified fs. 
+120   * writefs are both set to be the same 
specified fs.
 121   * Do not verify hbase-checksums while 
reading data from filesystem.
 122   * @param fs Set the noChecksumFs and 
writeFs to this specified filesystem.
 123   */

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
index 48e79b7..70ea204 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
@@ -103,146 +103,142 @@
 095}
 096
 097int frameLengthInt = (int) 
frameLength;
-098if (in.readableBytes()  
frameLengthInt) {
+098if (in.readableBytes()  
frameLengthInt + FRAME_LENGTH_FIELD_LENGTH) {
 099  return;
 100}
 101
 102
in.skipBytes(FRAME_LENGTH_FIELD_LENGTH);
 103
 104// extract frame
-105int readerIndex = in.readerIndex();
-106ByteBuf frame = 
in.retainedSlice(readerIndex, frameLengthInt);
-107in.readerIndex(readerIndex + 
frameLengthInt);
-108
-109out.add(frame);
-110  }
-111
-112  private void 
handleTooBigRequest(ByteBuf in) throws IOException {
-113in.markReaderIndex();
-114int preIndex = in.readerIndex();
-115int headerSize = 
readRawVarint32(in);
-116if (preIndex == in.readerIndex()) {
-117  return;
-118}
-119if (headerSize  0) {
-120  throw new IOException("negative 
headerSize: " + headerSize);
-121}
-122
-123if (in.readableBytes()  
headerSize) {
-124  in.resetReaderIndex();
-125  return;
-126}
-127
-128RPCProtos.RequestHeader header = 
getHeader(in, headerSize);
-129
-130// Notify the client about the 
offending request
-131NettyServerCall reqTooBig =
-132  new 
NettyServerCall(header.getCallId(), connection.service, null, null, null, 
null,
-133connection, 0, connection.addr, 
System.currentTimeMillis(), 0,
-134connection.rpcServer.reservoir, 
connection.rpcServer.cellBlockBuilder, null);
-135
-136

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 3edfbef..9707b2c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null  
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null  
perCfExtras.length()  0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Mapbyte[], 
ListPath committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 01c0791..4d26b63 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"8b32d3792934507c774997cd82dc061b75410f83";
+011  public static final String revision = 
"6a6409a30aa634875467683203de0e21e0491986";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Wed 
Nov 29 14:42:11 UTC 2017";
+013  public static final String date = "Thu 
Nov 30 14:42:34 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "aea71cf3474c2eecf646181c5e4c0fa3";
+015  public static final String srcChecksum 
= "252c37b6e2a91e50595f45109cbc77dd";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index 1bddf29..f667b93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -124,380 +124,381 @@
 116  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 117// Go big. Multiply by 10. If we 
can't get to meta after this many retries
 118// then something seriously wrong.
-119int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-120int retries = hcRetries * 
serversideMultiplier;
-121
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-122log.info(sn + " server-side 
Connection retries=" + retries);
-123  }
-124
-125  /**
-126   * A ClusterConnection that will 
short-circuit RPC making direct invocations against the
-127   * localhost if the invocation target 
is 'this' server; save on network and protobuf
-128   * invocations.
-129   */
-130  // TODO This has to still do PB 
marshalling/unmarshalling stuff. Check how/whether we can avoid.
-131  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
-132  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
-133private final ServerName 
serverName;
-134private final 
AdminService.BlockingInterface localHostAdmin;
-135private final 
ClientService.BlockingInterface localHostClient;
-136
-137private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
-138ServerName serverName, 
AdminService.BlockingInterface admin,
-139ClientService.BlockingInterface 
client)
-140throws IOException {
-141  super(conf, pool, user);
-142  this.serverName = serverName;
-143  this.localHostAdmin = admin;
-144  this.localHostClient = client;
-145}
-146
-147@Override
-148public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
-149  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
-150}
-151
-152@Override
-153public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-154  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
-155}
-156
-157@Override
-158public MasterKeepAliveConnection 
getKeepAliveMasterService() throws MasterNotRunningException {
-159  if (this.localHostClient instanceof 
MasterService.BlockingInterface) {
-160return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
-161  }
-162  return 
super.getKeepAliveMasterService();
-163}
-164  }
-165
-166  /**
-167   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-168   * deserialization, networking, etc..) 
when talking to a local server.
-169   * @param conf the current 
configuration
-170   * @param pool the thread pool to use 
for batch operations
-171   * @param user the user the connection 
is for
-172   * @param serverName the local server 
name
-173   * @param admin the admin interface of 
the local server
-174   * @param client the client interface 
of the local server
-175   * 

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index cb363f9..d7f2c1a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -880,1508 +880,1508 @@
 872throws IOException;
 873
 874  /**
-875   * Major compact a table. Asynchronous 
operation in that this method requests
-876   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+875   * Compact a table.  Asynchronous 
operation in that this method requests that a
+876   * Compaction run and then it returns. 
It does not wait on the completion of Compaction
 877   * (it can take a while).
 878   *
-879   * @param tableName table to major 
compact
-880   * @throws IOException if a remote or 
network exception occurs
-881   */
-882  void majorCompact(TableName tableName) 
throws IOException;
-883
-884  /**
-885   * Major compact a table or an 
individual region. Asynchronous operation in that this method requests
-886   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-887   * (it can take a while).
-888   *
-889   * @param regionName region to major 
compact
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void majorCompactRegion(byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Major compact a column family within 
a table. Asynchronous operation in that this method requests
-896   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-897   * (it can take a while).
-898   *
-899   * @param tableName table to major 
compact
-900   * @param columnFamily column family 
within a table
-901   * @throws IOException if a remote or 
network exception occurs
-902   */
-903  void majorCompact(TableName tableName, 
byte[] columnFamily)
-904throws IOException;
-905
-906  /**
-907   * Major compact a column family within 
region. Asynchronous operation in that this method requests
-908   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-909   * (it can take a while).
-910   *
-911   * @param regionName egion to major 
compact
-912   * @param columnFamily column family 
within a region
-913   * @throws IOException if a remote or 
network exception occurs
-914   */
-915  void majorCompactRegion(byte[] 
regionName, byte[] columnFamily)
-916throws IOException;
-917
-918  /**
-919   * Compact all regions on the region 
server. Asynchronous operation in that this method requests
-920   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction (it
-921   * can take a while).
-922   * @param sn the region server name
-923   * @param major if it's major 
compaction
-924   * @throws IOException if a remote or 
network exception occurs
-925   * @throws InterruptedException
-926   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0. Use
-927   * {@link 
#compactRegionServer(ServerName)} or
-928   * {@link 
#majorCompactRegionServer(ServerName)}.
+879   * @param tableName table to compact
+880   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+881   * @throws IOException if a remote or 
network exception occurs
+882   * @throws InterruptedException
+883   */
+884  void compact(TableName tableName, 
CompactType compactType)
+885throws IOException, 
InterruptedException;
+886
+887  /**
+888   * Compact a column family within a 
table.  Asynchronous operation in that this method
+889   * requests that a Compaction run and 
then it returns. It does not wait on the
+890   * completion of Compaction (it can 
take a while).
+891   *
+892   * @param tableName table to compact
+893   * @param columnFamily column family 
within a table
+894   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+895   * @throws IOException if not a mob 
column family or if a remote or network exception occurs
+896   * @throws InterruptedException
+897   */
+898  void compact(TableName tableName, 
byte[] columnFamily, CompactType compactType)
+899throws IOException, 
InterruptedException;
+900
+901  /**
+902   * Major compact a table. Asynchronous 
operation in that this method requests
+903   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+904   * (it can take a while).
+905   *
+906   * @param tableName table to major 
compact
+907   * @throws IOException if a remote or 
network exception occurs
+908   */
+909  void majorCompact(TableName tableName) 
throws IOException;
+910
+911  /**
+912   * Major compact a table or 

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 40cd159..2da0903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -260,7 +260,7 @@
 252
 253// Update meta events (for testing)
 254if (hasProcExecutor) {
-255  
getProcedureScheduler().suspendEvent(metaLoadEvent);
+255  metaLoadEvent.suspend();
 256  setFailoverCleanupDone(false);
 257  for (RegionInfo hri: 
getMetaRegionSet()) {
 258setMetaInitialized(hri, false);
@@ -421,1455 +421,1454 @@
 413  }
 414
 415  public boolean 
waitMetaInitialized(final Procedure proc, final RegionInfo regionInfo) {
-416return 
getProcedureScheduler().waitEvent(
-417  
getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc);
-418  }
-419
-420  private void setMetaInitialized(final 
RegionInfo metaRegionInfo, final boolean isInitialized) {
-421assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-422final ProcedureEvent metaInitEvent = 
getMetaInitializedEvent(metaRegionInfo);
-423if (isInitialized) {
-424  
getProcedureScheduler().wakeEvent(metaInitEvent);
-425} else {
-426  
getProcedureScheduler().suspendEvent(metaInitEvent);
-427}
-428  }
-429
-430  private ProcedureEvent 
getMetaInitializedEvent(final RegionInfo metaRegionInfo) {
-431assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-432// TODO: handle multiple meta.
-433return metaInitializedEvent;
-434  }
-435
-436  public boolean waitMetaLoaded(final 
Procedure proc) {
-437return 
getProcedureScheduler().waitEvent(metaLoadEvent, proc);
-438  }
-439
-440  protected void wakeMetaLoadedEvent() 
{
-441
getProcedureScheduler().wakeEvent(metaLoadEvent);
-442assert isMetaLoaded() : "expected 
meta to be loaded";
-443  }
-444
-445  public boolean isMetaLoaded() {
-446return metaLoadEvent.isReady();
-447  }
-448
-449  // 

-450  //  TODO: Sync helpers
-451  // 

-452  public void assignMeta(final RegionInfo 
metaRegionInfo) throws IOException {
-453assignMeta(metaRegionInfo, null);
-454  }
-455
-456  public void assignMeta(final RegionInfo 
metaRegionInfo, final ServerName serverName)
-457  throws IOException {
-458assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-459AssignProcedure proc;
-460if (serverName != null) {
-461  LOG.debug("Try assigning Meta " + 
metaRegionInfo + " to " + serverName);
-462  proc = 
createAssignProcedure(metaRegionInfo, serverName);
-463} else {
-464  LOG.debug("Assigning " + 
metaRegionInfo.getRegionNameAsString());
-465  proc = 
createAssignProcedure(metaRegionInfo, false);
-466}
-467
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-468  }
-469
-470  /**
-471   * Start a new thread to check if there 
are region servers whose versions are higher than others.
-472   * If so, move all system table regions 
to RS with the highest version to keep compatibility.
-473   * The reason is, RS in new version may 
not be able to access RS in old version when there are
-474   * some incompatible changes.
-475   */
-476  public void 
checkIfShouldMoveSystemRegionAsync() {
-477new Thread(() - {
-478  try {
-479synchronized 
(checkIfShouldMoveSystemRegionLock) {
-480  ListRegionPlan plans = 
new ArrayList();
-481  for (ServerName server : 
getExcludedServersForSystemTable()) {
-482if 
(master.getServerManager().isServerDead(server)) {
-483  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
-484  // considers only online 
servers, the server could be queued for dead server
-485  // processing. As region 
assignments for crashed server is handled by
-486  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
-487  // regular flow of 
LoadBalancer as a favored node and not to have this special
-488  // handling.
-489  continue;
-490}
-491ListRegionInfo 
regionsShouldMove = getCarryingSystemTables(server);
-492if 
(!regionsShouldMove.isEmpty()) {
-493  for (RegionInfo regionInfo 
: regionsShouldMove) {
-494   

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
index 3c079dc..4dbd4f5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class RegionCoprocessorHost.TableCoprocessorAttribute
+static class RegionCoprocessorHost.TableCoprocessorAttribute
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -228,7 +228,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 path
-privateorg.apache.hadoop.fs.Path path
+privateorg.apache.hadoop.fs.Path path
 
 
 
@@ -237,7 +237,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 className
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String className
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String className
 
 
 
@@ -246,7 +246,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 priority
-privateint priority
+privateint priority
 
 
 
@@ -255,7 +255,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -272,7 +272,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TableCoprocessorAttribute
-publicTableCoprocessorAttribute(org.apache.hadoop.fs.Pathpath,
+publicTableCoprocessorAttribute(org.apache.hadoop.fs.Pathpath,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,
  intpriority,
  
org.apache.hadoop.conf.Configurationconf)
@@ -292,7 +292,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getPath
-publicorg.apache.hadoop.fs.PathgetPath()
+publicorg.apache.hadoop.fs.PathgetPath()
 
 
 
@@ -301,7 +301,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getClassName
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetClassName()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetClassName()
 
 
 
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getPriority
-publicintgetPriority()
+publicintgetPriority()
 
 
 
@@ -319,7 +319,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getConf
-publicorg.apache.hadoop.conf.ConfigurationgetConf()
+publicorg.apache.hadoop.conf.ConfigurationgetConf()
 
 
 



[22/51] [partial] hbase-site git commit: Published site at .

2017-11-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
index a9e8ff7..c670860 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.TableCoprocessorAttribute.html
@@ -65,1723 +65,1733 @@
 057import 
org.apache.hadoop.hbase.client.TableDescriptor;
 058import 
org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
 059import 
org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
-060import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-061import 
org.apache.hadoop.hbase.coprocessor.CoprocessorService;
-062import 
org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
-063import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-064import 
org.apache.hadoop.hbase.coprocessor.EndpointObserver;
-065import 
org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
-066import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-067import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-068import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-069import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-070import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-071import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-072import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-073import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-074import 
org.apache.hadoop.hbase.io.Reference;
-075import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-076import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-077import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-078import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-079import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-080import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-081import 
org.apache.hadoop.hbase.security.User;
-082import 
org.apache.hadoop.hbase.util.Bytes;
-083import 
org.apache.hadoop.hbase.util.CoprocessorClassLoader;
-084import 
org.apache.hadoop.hbase.util.Pair;
-085import 
org.apache.hadoop.hbase.wal.WALEdit;
-086import 
org.apache.hadoop.hbase.wal.WALKey;
-087import 
org.apache.yetus.audience.InterfaceAudience;
-088
-089import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-090
-091/**
-092 * Implements the coprocessor environment 
and runtime support for coprocessors
-093 * loaded within a {@link Region}.
-094 */
-095@InterfaceAudience.Private
-096public class RegionCoprocessorHost
-097extends 
CoprocessorHostRegionCoprocessor, RegionCoprocessorEnvironment {
-098
-099  private static final Log LOG = 
LogFactory.getLog(RegionCoprocessorHost.class);
-100  // The shared data map
-101  private static final 
ReferenceMapString, ConcurrentMapString, Object SHARED_DATA_MAP 
=
-102  new 
ReferenceMap(AbstractReferenceMap.ReferenceStrength.HARD,
-103  
AbstractReferenceMap.ReferenceStrength.WEAK);
-104
-105  // optimization: no need to call 
postScannerFilterRow, if no coprocessor implements it
-106  private final boolean 
hasCustomPostScannerFilterRow;
-107
-108  /**
-109   *
-110   * Encapsulation of the environment of 
each coprocessor
-111   */
-112  private static class RegionEnvironment 
extends BaseEnvironmentRegionCoprocessor
-113  implements 
RegionCoprocessorEnvironment {
-114private Region region;
-115ConcurrentMapString, Object 
sharedData;
-116private final MetricRegistry 
metricRegistry;
-117private final RegionServerServices 
services;
-118
-119/**
-120 * Constructor
-121 * @param impl the coprocessor 
instance
-122 * @param priority chaining 
priority
-123 */
-124public RegionEnvironment(final 
RegionCoprocessor impl, final int priority,
-125final int seq, final 
Configuration conf, final Region region,
-126final RegionServerServices 
services, final ConcurrentMapString, Object sharedData) {
-127  super(impl, priority, seq, conf);
-128  this.region = region;
-129  this.sharedData = sharedData;
-130  this.services = services;
-131  this.metricRegistry =
-132  
MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName());
-133}
-134
-135/** @return the region */
-136@Override
-137public Region getRegion() {
-138  return region;
-139}
-140
-141public OnlineRegions 
getOnlineRegions() {
-142  return 

  1   2   3   >