[14/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
index 1876095..b4aeccc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
@@ -104,40 +104,56 @@
 096return 
hasExpiredStores(storeFiles);
 097  }
 098
-099  private boolean 
hasExpiredStores(CollectionHStoreFile files) {
-100long currentTime = 
EnvironmentEdgeManager.currentTime();
-101for(HStoreFile sf: files){
-102  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
-103  long maxTs = 
sf.getReader().getMaxTimestamp();
-104  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
-105  if (maxTtl == Long.MAX_VALUE
-106  || (currentTime - maxTtl  
maxTs)){
-107continue; 
-108  } else{
-109return true;
-110  }
-111}
-112return false;
-113  }
-114
-115  private CollectionHStoreFile 
getExpiredStores(CollectionHStoreFile files,
-116  CollectionHStoreFile 
filesCompacting) {
-117long currentTime = 
EnvironmentEdgeManager.currentTime();
-118CollectionHStoreFile 
expiredStores = new ArrayList();
-119for(HStoreFile sf: files){
-120  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
-121  long maxTs = 
sf.getReader().getMaxTimestamp();
-122  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
-123  if (maxTtl == Long.MAX_VALUE
-124  || (currentTime - maxTtl  
maxTs)){
-125continue; 
-126  } else if(filesCompacting == null 
|| !filesCompacting.contains(sf)){
-127expiredStores.add(sf);
-128  }
-129}
-130return expiredStores;
-131  }
-132}
+099  /**
+100   * The FIFOCompactionPolicy only choose 
those TTL expired HFiles as the compaction candidates. So
+101   * if all HFiles are TTL expired, then 
the compaction will generate a new empty HFile. While its
+102   * max timestamp will be 
Long.MAX_VALUE. If not considered separately, the HFile will never be
+103   * archived because its TTL will be 
never expired. So we'll check the empty store file separately.
+104   * (See HBASE-21504)
+105   */
+106  private boolean 
isEmptyStoreFile(HStoreFile sf) {
+107return sf.getReader().getEntries() == 
0;
+108  }
+109
+110  private boolean 
hasExpiredStores(CollectionHStoreFile files) {
+111long currentTime = 
EnvironmentEdgeManager.currentTime();
+112for (HStoreFile sf : files) {
+113  if (isEmptyStoreFile(sf)) {
+114return true;
+115  }
+116  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
+117  long maxTs = 
sf.getReader().getMaxTimestamp();
+118  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
+119  if (maxTtl == Long.MAX_VALUE || 
(currentTime - maxTtl  maxTs)) {
+120continue;
+121  } else {
+122return true;
+123  }
+124}
+125return false;
+126  }
+127
+128  private CollectionHStoreFile 
getExpiredStores(CollectionHStoreFile files,
+129  CollectionHStoreFile 
filesCompacting) {
+130long currentTime = 
EnvironmentEdgeManager.currentTime();
+131CollectionHStoreFile 
expiredStores = new ArrayList();
+132for (HStoreFile sf : files) {
+133  if (isEmptyStoreFile(sf)) {
+134expiredStores.add(sf);
+135continue;
+136  }
+137  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
+138  long maxTs = 
sf.getReader().getMaxTimestamp();
+139  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
+140  if (maxTtl == Long.MAX_VALUE || 
(currentTime - maxTtl  maxTs)) {
+141continue;
+142  } else if (filesCompacting == null 
|| !filesCompacting.contains(sf)) {
+143expiredStores.add(sf);
+144  }
+145}
+146return expiredStores;
+147  }
+148}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
index 67f0fc6..9870370 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
@@ -257,99 +257,109 @@
 249  // Do backups first.
 250  JVMClusterUtil.MasterThread 
activeMaster = null;
 251  for (JVMClusterUtil.MasterThread t 
: masters) {
-252if 

[14/26] hbase-site git commit: Published site at 6f15cecaed2f1f76bfe1880b7c578ed369daa5d5.

2018-11-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dccdd274/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index 6369c27..ea05301 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -603,3251 +603,3256 @@
 595  // init superusers and add the 
server principal (if using security)
 596  // or process owner as default 
super user.
 597  Superusers.initialize(conf);
-598
-599  regionServerAccounting = new 
RegionServerAccounting(conf);
+598  regionServerAccounting = new 
RegionServerAccounting(conf);
+599
 600  boolean isMasterNotCarryTable =
 601  this instanceof HMaster 
 !LoadBalancer.isTablesOnMaster(conf);
-602  cacheConfig = new CacheConfig(conf, 
!isMasterNotCarryTable);
-603  mobCacheConfig = new 
MobCacheConfig(conf, !isMasterNotCarryTable);
-604  uncaughtExceptionHandler = new 
UncaughtExceptionHandler() {
-605@Override
-606public void 
uncaughtException(Thread t, Throwable e) {
-607  abort("Uncaught exception in 
executorService thread " + t.getName(), e);
-608}
-609  };
-610
-611  initializeFileSystem();
-612  spanReceiverHost = 
SpanReceiverHost.getInstance(getConfiguration());
-613
-614  this.configurationManager = new 
ConfigurationManager();
-615  setupWindows(getConfiguration(), 
getConfigurationManager());
-616
-617  // Some unit tests don't need a 
cluster, so no zookeeper at all
-618  if 
(!conf.getBoolean("hbase.testing.nocluster", false)) {
-619// Open connection to zookeeper 
and set primary watcher
-620zooKeeper = new ZKWatcher(conf, 
getProcessName() + ":" +
-621  rpcServices.isa.getPort(), 
this, canCreateBaseZNode());
-622// If no master in cluster, skip 
trying to track one or look for a cluster status.
-623if (!this.masterless) {
-624  this.csm = new 
ZkCoordinatedStateManager(this);
-625
-626  masterAddressTracker = new 
MasterAddressTracker(getZooKeeper(), this);
-627  masterAddressTracker.start();
-628
-629  clusterStatusTracker = new 
ClusterStatusTracker(zooKeeper, this);
-630  clusterStatusTracker.start();
-631} else {
-632  masterAddressTracker = null;
-633  clusterStatusTracker = null;
-634}
-635  } else {
-636zooKeeper = null;
-637masterAddressTracker = null;
-638clusterStatusTracker = null;
-639  }
-640  
this.rpcServices.start(zooKeeper);
-641  // This violates 'no starting stuff 
in Constructor' but Master depends on the below chore
-642  // and executor being created and 
takes a different startup route. Lots of overlap between HRS
-643  // and M (An M IS A HRS now). Need 
to refactor so less duplication between M and its super
-644  // Master expects Constructor to 
put up web servers. Ugh.
-645  // class HRS. TODO.
-646  this.choreService = new 
ChoreService(getName(), true);
-647  this.executorService = new 
ExecutorService(getName());
-648  putUpWebUI();
-649} catch (Throwable t) {
-650  // Make sure we log the exception. 
HRegionServer is often started via reflection and the
-651  // cause of failed startup is 
lost.
-652  LOG.error("Failed construction 
RegionServer", t);
-653  throw t;
-654}
-655  }
-656
-657  // HMaster should override this method 
to load the specific config for master
-658  protected String 
getUseThisHostnameInstead(Configuration conf) throws IOException {
-659String hostname = 
conf.get(RS_HOSTNAME_KEY);
-660if 
(conf.getBoolean(RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) {
-661  if (!StringUtils.isBlank(hostname)) 
{
-662String msg = 
RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + RS_HOSTNAME_KEY +
-663  " are mutually exclusive. Do 
not set " + RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY +
-664  " to true while " + 
RS_HOSTNAME_KEY + " is used";
-665throw new IOException(msg);
-666  } else {
-667return 
rpcServices.isa.getHostName();
-668  }
-669} else {
-670  return hostname;
-671}
-672  }
-673
-674  /**
-675   * If running on Windows, do 
windows-specific setup.
-676   */
-677  private static void setupWindows(final 
Configuration conf, ConfigurationManager cm) {
-678if (!SystemUtils.IS_OS_WINDOWS) {
-679  Signal.handle(new Signal("HUP"), 
new SignalHandler() {
-680@Override
-681public void handle(Signal signal) 
{
-682 

[14/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for 

[14/26] hbase-site git commit: Published site at 7464e2ef9d420d5d8c559600a15d69ed1f3fd41a.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd306e04/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index 93a57cb..f8c8b32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -33,4324 +33,4323 @@
 025import java.io.InterruptedIOException;
 026import java.util.ArrayList;
 027import java.util.Arrays;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.Iterator;
-031import java.util.LinkedList;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.concurrent.Callable;
-036import 
java.util.concurrent.ExecutionException;
-037import java.util.concurrent.Future;
-038import java.util.concurrent.TimeUnit;
-039import 
java.util.concurrent.TimeoutException;
-040import 
java.util.concurrent.atomic.AtomicInteger;
-041import 
java.util.concurrent.atomic.AtomicReference;
-042import java.util.function.Supplier;
-043import java.util.regex.Pattern;
-044import java.util.stream.Collectors;
-045import java.util.stream.Stream;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.Abortable;
-048import 
org.apache.hadoop.hbase.CacheEvictionStats;
-049import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-050import 
org.apache.hadoop.hbase.ClusterMetrics;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLocations;
-065import 
org.apache.hadoop.hbase.RegionMetrics;
-066import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-067import 
org.apache.hadoop.hbase.ServerName;
-068import 
org.apache.hadoop.hbase.TableExistsException;
-069import 
org.apache.hadoop.hbase.TableName;
-070import 
org.apache.hadoop.hbase.TableNotDisabledException;
-071import 
org.apache.hadoop.hbase.TableNotFoundException;
-072import 
org.apache.hadoop.hbase.UnknownRegionException;
-073import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-074import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-075import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-076import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-077import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-079import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-080import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-081import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-082import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-083import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-084import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-085import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationException;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-088import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-089import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-090import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-091import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-092import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-093import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-094import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-095import 
org.apache.hadoop.hbase.util.Addressing;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-098import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-099import 
org.apache.hadoop.hbase.util.Pair;
-100import 
org.apache.hadoop.ipc.RemoteException;
-101import 
org.apache.hadoop.util.StringUtils;
-102import 
org.apache.yetus.audience.InterfaceAudience;
-103import 
org.apache.yetus.audience.InterfaceStability;
-104import 

[14/26] hbase-site git commit: Published site at 42aa3dd463c0d30a9b940d296b87316b5c67e1f5.

2018-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37b8a04a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index e984063..083ab07 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -53,1074 +53,1082 @@
 045import 
org.apache.hadoop.conf.Configuration;
 046import org.apache.hadoop.fs.FileSystem;
 047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import org.apache.hadoop.hbase.Server;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.hadoop.hbase.replication.ReplicationException;
-053import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-057import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-058import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-059import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-060import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-061import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-062import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-063import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-064import 
org.apache.hadoop.hbase.util.Pair;
-065import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-066import 
org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
-067import 
org.apache.yetus.audience.InterfaceAudience;
-068import 
org.apache.zookeeper.KeeperException;
-069import org.slf4j.Logger;
-070import org.slf4j.LoggerFactory;
-071
-072import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-073import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-074
-075/**
-076 * This class is responsible to manage 
all the replication sources. There are two classes of
-077 * sources:
-078 * ul
-079 * liNormal sources are 
persistent and one per peer cluster/li
-080 * liOld sources are recovered 
from a failed region server and our only goal is to finish
-081 * replicating the WAL queue it 
had/li
-082 * /ul
-083 * p
-084 * When a region server dies, this class 
uses a watcher to get notified and it tries to grab a lock
-085 * in order to transfer all the queues in 
a local old source.
-086 * p
-087 * Synchronization specification:
-088 * ul
-089 * liNo need synchronized on 
{@link #sources}. {@link #sources} is a ConcurrentHashMap and there
-090 * is a Lock for peer id in {@link 
PeerProcedureHandlerImpl}. So there is no race for peer
-091 * operations./li
-092 * liNeed synchronized on {@link 
#walsById}. There are four methods which modify it,
-093 * {@link #addPeer(String)}, {@link 
#removePeer(String)},
-094 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} and {@link #preLogRoll(Path)}.
-095 * {@link #walsById} is a 
ConcurrentHashMap and there is a Lock for peer id in
-096 * {@link PeerProcedureHandlerImpl}. So 
there is no race between {@link #addPeer(String)} and
-097 * {@link #removePeer(String)}. {@link 
#cleanOldLogs(String, boolean, ReplicationSourceInterface)}
-098 * is called by {@link 
ReplicationSourceInterface}. So no race with {@link #addPeer(String)}.
-099 * {@link #removePeer(String)} will 
terminate the {@link ReplicationSourceInterface} firstly, then
-100 * remove the wals from {@link 
#walsById}. So no race with {@link #removePeer(String)}. The only
-101 * case need synchronized is {@link 
#cleanOldLogs(String, boolean, ReplicationSourceInterface)} and
-102 * {@link 
#preLogRoll(Path)}./li
-103 * liNo need synchronized on 
{@link #walsByIdRecoveredQueues}. There are three methods which
-104 * modify it, {@link #removePeer(String)} 
,
-105 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} and
-106 * {@link 
ReplicationSourceManager.NodeFailoverWorker#run()}.
-107 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} is called by
-108 * {@link ReplicationSourceInterface}. 
{@link #removePeer(String)} will terminate the
-109 * {@link ReplicationSourceInterface} 
firstly, then remove the wals from
-110 * {@link #walsByIdRecoveredQueues}. And 
{@link ReplicationSourceManager.NodeFailoverWorker#run()}
-111 * will add the wals to {@link 
#walsByIdRecoveredQueues} firstly, then start up a
-112 * {@link ReplicationSourceInterface}. 

[14/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader = 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader  
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum 
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no 

[14/26] hbase-site git commit: Published site at 1e56938757d2958631ac1ea07387eaa61997d84a.

2018-04-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b707139a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
index 6007f27..4ce6735 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
@@ -65,716 +65,716 @@
 057import 
org.apache.hadoop.hbase.client.Table;
 058import 
org.apache.hadoop.hbase.client.TableState;
 059import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-060import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-061import 
org.apache.hadoop.hbase.master.RegionState;
-062import 
org.apache.hadoop.hbase.master.RegionState.State;
-063import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-066import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-073import 
org.apache.hadoop.hbase.util.Pair;
-074import 
org.apache.hadoop.hbase.util.PairOfSameType;
-075import 
org.apache.yetus.audience.InterfaceAudience;
-076import org.slf4j.Logger;
-077import org.slf4j.LoggerFactory;
-078
-079import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+060import 
org.apache.hadoop.hbase.filter.Filter;
+061import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+062import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+063import 
org.apache.hadoop.hbase.master.RegionState;
+064import 
org.apache.hadoop.hbase.master.RegionState.State;
+065import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+066import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+067import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+068import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+069import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+070import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+071import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
+072import 
org.apache.hadoop.hbase.util.Bytes;
+073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+074import 
org.apache.hadoop.hbase.util.ExceptionUtil;
+075import 
org.apache.hadoop.hbase.util.Pair;
+076import 
org.apache.hadoop.hbase.util.PairOfSameType;
+077import 
org.apache.yetus.audience.InterfaceAudience;
+078import org.slf4j.Logger;
+079import org.slf4j.LoggerFactory;
 080
-081/**
-082 * p
-083 * Read/write operations on region and 
assignment information store in codehbase:meta/code.
-084 * /p
-085 * p
-086 * Some of the methods of this class take 
ZooKeeperWatcher as a param. The only reason for this is
-087 * because when used on client-side (like 
from HBaseAdmin), we want to use short-living connection
-088 * (opened before each operation, closed 
right after), while when used on HM or HRS (like in
-089 * AssignmentManager) we want permanent 
connection.
-090 * /p
-091 * p
-092 * HBASE-10070 adds a replicaId to HRI, 
meaning more than one HRI can be defined for the same table
-093 * range (table, startKey, endKey). For 
every range, there will be at least one HRI defined which is
-094 * called default replica.
-095 * /p
-096 * p
-097 * Meta layout (as of 0.98 + HBASE-10070) 
is like:
-098 *
-099 * pre
-100 * For each table there is single row in 
column family 'table' formatted:
-101 * lt;tableNamegt; including 
namespace and columns are:
-102 * table: state = 
contains table state
-103 *
-104 * For each table range, there is a 
single row, formatted like:
-105 * 
lt;tableNamegt;,lt;startKeygt;,lt;regionIdgt;,lt;encodedRegionNamegt;.
-106 * This row corresponds to the regionName 
of the default region replica.
-107 * Columns are:
-108 * info:regioninfo = contains 
serialized HRI for the default region replica
-109 * info:server = contains 
hostname:port (in string form) for the server hosting
-110 *the default 
regionInfo replica
-111 * info:server_lt;replicaIdgt 
= contains hostname:port (in string form) for the 

[14/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.

2018-03-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
index eccc4a3..ebbde54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
@@ -1744,1869 +1744,1872 @@
 1736  HRegion region = 
getRegion(request.getRegion());
 1737  RegionInfo info = 
region.getRegionInfo();
 1738  byte[] bestSplitRow = null;
-1739  if (request.hasBestSplitRow() 
 request.getBestSplitRow()) {
-1740HRegion r = region;
-1741
region.startRegionOperation(Operation.SPLIT_REGION);
-1742r.forceSplit(null);
-1743bestSplitRow = r.checkSplit();
-1744// when all table data are in 
memstore, bestSplitRow = null
-1745// try to flush region first
-1746if(bestSplitRow == null) {
-1747  r.flush(true);
-1748  bestSplitRow = 
r.checkSplit();
-1749}
-1750r.clearSplit();
-1751  }
-1752  GetRegionInfoResponse.Builder 
builder = GetRegionInfoResponse.newBuilder();
-1753  
builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
-1754  if (request.hasCompactionState() 
 request.getCompactionState()) {
-1755
builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState()));
-1756  }
-1757  
builder.setSplittable(region.isSplittable());
-1758  
builder.setMergeable(region.isMergeable());
-1759  if (request.hasBestSplitRow() 
 request.getBestSplitRow()  bestSplitRow != null) {
-1760
builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow));
-1761  }
-1762  return builder.build();
-1763} catch (IOException ie) {
-1764  throw new ServiceException(ie);
-1765}
-1766  }
-1767
-1768  @Override
-1769  
@QosPriority(priority=HConstants.ADMIN_QOS)
-1770  public GetRegionLoadResponse 
getRegionLoad(RpcController controller,
-1771  GetRegionLoadRequest request) 
throws ServiceException {
-1772
-1773ListHRegion regions;
-1774if (request.hasTableName()) {
-1775  TableName tableName = 
ProtobufUtil.toTableName(request.getTableName());
-1776  regions = 
regionServer.getRegions(tableName);
-1777} else {
-1778  regions = 
regionServer.getRegions();
-1779}
-1780ListRegionLoad rLoads = new 
ArrayList(regions.size());
-1781RegionLoad.Builder regionLoadBuilder 
= ClusterStatusProtos.RegionLoad.newBuilder();
-1782RegionSpecifier.Builder 
regionSpecifier = RegionSpecifier.newBuilder();
-1783
-1784try {
-1785  for (HRegion region : regions) {
-1786
rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, 
regionSpecifier));
-1787  }
-1788} catch (IOException e) {
-1789  throw new ServiceException(e);
-1790}
-1791GetRegionLoadResponse.Builder 
builder = GetRegionLoadResponse.newBuilder();
-1792builder.addAllRegionLoads(rLoads);
-1793return builder.build();
-1794  }
-1795
-1796  @Override
-1797  
@QosPriority(priority=HConstants.ADMIN_QOS)
-1798  public ClearCompactionQueuesResponse 
clearCompactionQueues(RpcController controller,
-1799ClearCompactionQueuesRequest 
request) throws ServiceException {
-1800LOG.debug("Client=" + 
RpcServer.getRequestUserName().orElse(null) + "/"
-1801+ 
RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue");
-1802
ClearCompactionQueuesResponse.Builder respBuilder = 
ClearCompactionQueuesResponse.newBuilder();
-1803requestCount.increment();
-1804if 
(clearCompactionQueues.compareAndSet(false,true)) {
-1805  try {
-1806checkOpen();
-1807
regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues();
-1808for (String queueName : 
request.getQueueNameList()) {
-1809  LOG.debug("clear " + queueName 
+ " compaction queue");
-1810  switch (queueName) {
-1811case "long":
-1812  
regionServer.compactSplitThread.clearLongCompactionsQueue();
-1813  break;
-1814case "short":
-1815  
regionServer.compactSplitThread.clearShortCompactionsQueue();
+1739  boolean shouldSplit = true;
+1740  if (request.hasBestSplitRow() 
 request.getBestSplitRow()) {
+1741HRegion r = region;
+1742
region.startRegionOperation(Operation.SPLIT_REGION);
+1743r.forceSplit(null);
+1744// Even after setting force 
split if split policy says no to split then we should not split.
+1745shouldSplit = 

[14/26] hbase-site git commit: Published site at 67f013430c9ba051385c45d72ee680c44eb88470.

2018-03-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd35fe02/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
index 1d94c77..484b604 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.WorkerState.html
@@ -27,50 +27,50 @@
 019
 020import java.io.IOException;
 021import java.util.List;
-022import java.util.Map;
-023import 
java.util.concurrent.PriorityBlockingQueue;
-024import 
org.apache.hadoop.conf.Configuration;
-025import org.apache.hadoop.fs.Path;
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CellUtil;
-028import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-029import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-030import 
org.apache.hadoop.hbase.util.Threads;
-031import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-032import 
org.apache.hadoop.hbase.wal.WALEdit;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import org.slf4j.Logger;
-035import org.slf4j.LoggerFactory;
-036
-037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
-039
-040/**
-041 * This thread reads entries from a queue 
and ships them. Entries are placed onto the queue by
-042 * ReplicationSourceWALReaderThread
-043 */
-044@InterfaceAudience.Private
-045public class ReplicationSourceShipper 
extends Thread {
-046  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationSourceShipper.class);
-047
-048  // Hold the state of a replication 
worker thread
-049  public enum WorkerState {
-050RUNNING,
-051STOPPED,
-052FINISHED,  // The worker is done 
processing a recovered queue
-053  }
-054
-055  protected final Configuration conf;
-056  protected final String walGroupId;
-057  protected final 
PriorityBlockingQueuePath queue;
-058  protected final 
ReplicationSourceInterface source;
-059
-060  // Last position in the log that we 
sent to ZooKeeper
-061  protected long lastLoggedPosition = 
-1;
+022import 
java.util.concurrent.PriorityBlockingQueue;
+023import 
org.apache.hadoop.conf.Configuration;
+024import org.apache.hadoop.fs.Path;
+025import org.apache.hadoop.hbase.Cell;
+026import 
org.apache.hadoop.hbase.CellUtil;
+027import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
+028import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+029import 
org.apache.hadoop.hbase.util.Threads;
+030import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+031import 
org.apache.hadoop.hbase.wal.WALEdit;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033import org.slf4j.Logger;
+034import org.slf4j.LoggerFactory;
+035
+036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
+037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+038
+039/**
+040 * This thread reads entries from a queue 
and ships them. Entries are placed onto the queue by
+041 * ReplicationSourceWALReaderThread
+042 */
+043@InterfaceAudience.Private
+044public class ReplicationSourceShipper 
extends Thread {
+045  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationSourceShipper.class);
+046
+047  // Hold the state of a replication 
worker thread
+048  public enum WorkerState {
+049RUNNING,
+050STOPPED,
+051FINISHED,  // The worker is done 
processing a recovered queue
+052  }
+053
+054  private final Configuration conf;
+055  protected final String walGroupId;
+056  protected final 
PriorityBlockingQueuePath queue;
+057  private final 
ReplicationSourceInterface source;
+058
+059  // Last position in the log that we 
sent to ZooKeeper
+060  // It will be accessed by the stats 
thread so make it volatile
+061  private volatile long currentPosition = 
-1;
 062  // Path of the current log
-063  protected volatile Path currentPath;
+063  private Path currentPath;
 064  // Current state of the worker thread
-065  private WorkerState state;
+065  private volatile WorkerState state;
 066  protected ReplicationSourceWALReader 
entryReader;
 067
 068  // How long should we sleep for each 
retry
@@ -105,204 +105,212 @@
 097  }
 098  try {
 099WALEntryBatch entryBatch = 
entryReader.take();
-100shipEdits(entryBatch);
-101postShipEdits(entryBatch);
-102  } catch (InterruptedException e) 
{
-103LOG.trace("Interrupted while 
waiting for next replication entry batch", e);
-104

[14/26] hbase-site git commit: Published site at .

2018-02-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 553749a..ccbd641 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -908,7 +908,7 @@ extends 
 
 
-HRegionServer
+void
 expireRegionServerSession(intindex)
 Expire a region server's session
 
@@ -4504,8 +4504,8 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 expireRegionServerSession
-publicHRegionServerexpireRegionServerSession(intindex)
-throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
+publicvoidexpireRegionServerSession(intindex)
+   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Expire a region server's session
 
 Parameters:
@@ -4521,7 +4521,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 expireSession
-publicvoidexpireSession(ZKWatchernodeZK)
+publicvoidexpireSession(ZKWatchernodeZK)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -4535,7 +4535,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 expireSession
-publicvoidexpireSession(ZKWatchernodeZK,
+publicvoidexpireSession(ZKWatchernodeZK,
   booleancheckStatus)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Expire a ZooKeeper session as recommended in ZooKeeper 
documentation
@@ -4559,7 +4559,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getHBaseCluster
-publicMiniHBaseClustergetHBaseCluster()
+publicMiniHBaseClustergetHBaseCluster()
 Get the Mini HBase cluster.
 
 Returns:
@@ -4575,7 +4575,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getHBaseClusterInterface
-publicHBaseClustergetHBaseClusterInterface()
+publicHBaseClustergetHBaseClusterInterface()
 Returns the HBaseCluster instance.
  Returned object can be any of the subclasses of HBaseCluster, and the
  tests referring this should not assume that the cluster is a mini cluster or a
@@ -4590,7 +4590,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getConnection
-publicConnectiongetConnection()
+publicConnectiongetConnection()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Get a Connection to the cluster.
  Not thread-safe (This class needs a lot of work to make it thread-safe).
@@ -4609,7 +4609,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 getHBaseAdmin
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicHBaseAdmingetHBaseAdmin()
+publicHBaseAdmingetHBaseAdmin()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deprecated.Since 2.0. Will be removed in 3.0. Use getAdmin()
 instead.
 Returns a Admin instance.
@@ -4631,7 +4631,7 @@ public
 
 getAdmin
-publicAdmingetAdmin()
+publicAdmingetAdmin()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns an Admin instance which is shared between 
HBaseTestingUtility instance users.
  Closing it has no effect, it will be closed automatically when the cluster 
shutdowns
@@ -4647,7 +4647,7 @@ public
 
 unassignRegion
-publicvoidunassignRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringregionName)
+publicvoidunassignRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringregionName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Unassign the named region.
 
@@ -4664,7 +4664,7 @@ public
 
 unassignRegion
-publicvoidunassignRegion(byte[]regionName)
+publicvoidunassignRegion(byte[]regionName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[14/26] hbase-site git commit: Published site at .

2017-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7d90d02f/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index 07b6ae0..21f2337 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -47,1510 +47,1527 @@
 039import 
org.apache.commons.logging.LogFactory;
 040import 
org.apache.hadoop.conf.Configuration;
 041import 
org.apache.hadoop.hbase.ClusterStatus;
-042import 
org.apache.hadoop.hbase.HBaseIOException;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.ServerLoad;
-047import 
org.apache.hadoop.hbase.ServerName;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.master.LoadBalancer;
-051import 
org.apache.hadoop.hbase.master.MasterServices;
-052import 
org.apache.hadoop.hbase.master.RackManager;
-053import 
org.apache.hadoop.hbase.master.RegionPlan;
-054import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-055
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-057import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
-058import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-059import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-060import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-061
-062/**
-063 * The base class for load balancers. It 
provides the the functions used to by
-064 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-065 * in the edge cases. It doesn't provide 
an implementation of the
-066 * actual balancing algorithm.
-067 *
-068 */
-069public abstract class BaseLoadBalancer 
implements LoadBalancer {
-070  protected static final int 
MIN_SERVER_BALANCE = 2;
-071  private volatile boolean stopped = 
false;
-072
-073  private static final 
ListHRegionInfo EMPTY_REGION_LIST = new ArrayList(0);
-074
-075  static final 
PredicateServerLoad IDLE_SERVER_PREDICATOR
-076= load - 
load.getNumberOfRegions() == 0;
-077
-078  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-079
-080  private static class DefaultRackManager 
extends RackManager {
-081@Override
-082public String getRack(ServerName 
server) {
-083  return UNKNOWN_RACK;
-084}
-085  }
-086
-087  /**
-088   * The constructor that uses the basic 
MetricsBalancer
-089   */
-090  protected BaseLoadBalancer() {
-091metricsBalancer = new 
MetricsBalancer();
-092  }
-093
-094  /**
-095   * This Constructor accepts an instance 
of MetricsBalancer,
-096   * which will be used instead of 
creating a new one
-097   */
-098  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-099this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-100  }
-101
-102  /**
-103   * An efficient array based 
implementation similar to ClusterState for keeping
-104   * the status of the cluster in terms 
of region assignment and distribution.
-105   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-106   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-107   * class uses mostly indexes and 
arrays.
-108   *
-109   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-110   * topology in terms of server names, 
hostnames and racks.
-111   */
-112  protected static class Cluster {
-113ServerName[] servers;
-114String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-115String[] racks;
-116boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-117
-118ArrayListString tables;
-119HRegionInfo[] regions;
-120DequeBalancerRegionLoad[] 
regionLoads;
-121private RegionLocationFinder 
regionFinder;
-122
-123int[][] regionLocations; 
//regionIndex - list of serverIndex sorted by locality
-124
-125int[]   serverIndexToHostIndex;  
//serverIndex - host index
-126int[]   serverIndexToRackIndex;  
//serverIndex - rack index
-127
-128int[][] regionsPerServer;
//serverIndex - region list
-129int[][] regionsPerHost;  
//hostIndex - list of regions
-130int[][] 

[14/26] hbase-site git commit: Published site at .

2017-09-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/539471a7/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index a81731d..bdeba84 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -604,13 +604,13 @@
 596   * @param row
 597   * @param family
 598   * @param qualifier
-599   * @param compareOp
+599   * @param op
 600   * @param comparator @throws 
IOException
 601   */
 602  private boolean checkAndRowMutate(final 
Region region, final ListClientProtos.Action actions,
-603final 
CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier,
-604
CompareOperator op, ByteArrayComparable comparator, RegionActionResult.Builder 
builder,
-605
ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
+603  final CellScanner cellScanner, 
byte[] row, byte[] family, byte[] qualifier,
+604  CompareOperator op, 
ByteArrayComparable comparator, RegionActionResult.Builder builder,
+605  ActivePolicyEnforcement 
spaceQuotaEnforcement) throws IOException {
 606if 
(!region.getRegionInfo().isMetaTable()) {
 607  
regionServer.cacheFlusher.reclaimMemStoreMemory();
 608}
@@ -655,2847 +655,2843 @@
 647
 648  /**
 649   * Execute an append mutation.
-650   *
-651   * @param region
-652   * @param m
-653   * @param cellScanner
-654   * @return result to return to client 
if default operation should be
-655   * bypassed as indicated by 
RegionObserver, null otherwise
-656   * @throws IOException
-657   */
-658  private Result append(final Region 
region, final OperationQuota quota,
-659  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
-660  ActivePolicyEnforcement 
spaceQuota)
-661  throws IOException {
-662long before = 
EnvironmentEdgeManager.currentTime();
-663Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
-664checkCellSizeLimit(region, append);
-665
spaceQuota.getPolicyEnforcement(region).check(append);
-666quota.addMutation(append);
-667Result r = null;
-668if (region.getCoprocessorHost() != 
null) {
-669  r = 
region.getCoprocessorHost().preAppend(append);
-670}
-671if (r == null) {
-672  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
-673  boolean success = false;
-674  try {
-675long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
-676if (canProceed) {
-677  r = region.append(append, 
nonceGroup, nonce);
-678} else {
-679  // convert duplicate append to 
get
-680  ListCell results = 
region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
-681nonceGroup, nonce);
-682  r = Result.create(results);
-683}
-684success = true;
-685  } finally {
-686if (canProceed) {
-687  endNonceOperation(mutation, 
nonceGroup, success);
-688}
-689  }
-690  if (region.getCoprocessorHost() != 
null) {
-691r = 
region.getCoprocessorHost().postAppend(append, r);
-692  }
+650   * @return result to return to client 
if default operation should be
+651   * bypassed as indicated by 
RegionObserver, null otherwise
+652   * @throws IOException
+653   */
+654  private Result append(final Region 
region, final OperationQuota quota,
+655  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
+656  ActivePolicyEnforcement 
spaceQuota)
+657  throws IOException {
+658long before = 
EnvironmentEdgeManager.currentTime();
+659Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
+660checkCellSizeLimit(region, append);
+661
spaceQuota.getPolicyEnforcement(region).check(append);
+662quota.addMutation(append);
+663Result r = null;
+664if (region.getCoprocessorHost() != 
null) {
+665  r = 
region.getCoprocessorHost().preAppend(append);
+666}
+667if (r == null) {
+668  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
+669  boolean success = false;
+670  try {
+671long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
+672if (canProceed) {
+673  r = region.append(append, 
nonceGroup, nonce);
+674} else {
+675  // convert duplicate append to 
get
+676  ListCell results = 
region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
+677nonceGroup, 

[14/26] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fdcfc8d5/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
index 3c6f9b8..7d3deb8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.UserQuotasVisitor.html
@@ -39,731 +39,772 @@
 031import org.apache.commons.logging.Log;
 032import 
org.apache.commons.logging.LogFactory;
 033import org.apache.hadoop.hbase.Cell;
-034import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-035import 
org.apache.hadoop.hbase.ServerName;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-039import 
org.apache.hadoop.hbase.client.ClusterConnection;
-040import 
org.apache.hadoop.hbase.client.Connection;
-041import 
org.apache.hadoop.hbase.client.Get;
-042import 
org.apache.hadoop.hbase.client.Put;
-043import 
org.apache.hadoop.hbase.client.QuotaStatusCalls;
-044import 
org.apache.hadoop.hbase.client.Result;
-045import 
org.apache.hadoop.hbase.client.ResultScanner;
-046import 
org.apache.hadoop.hbase.client.Scan;
-047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-049import 
org.apache.hadoop.hbase.filter.CompareFilter;
-050import 
org.apache.hadoop.hbase.filter.Filter;
-051import 
org.apache.hadoop.hbase.filter.FilterList;
-052import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-053import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-054import 
org.apache.hadoop.hbase.filter.RowFilter;
-055import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-058import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-069import 
org.apache.hadoop.hbase.util.Bytes;
-070import 
org.apache.hadoop.hbase.util.Strings;
-071
-072/**
-073 * Helper class to interact with the 
quota table.
-074 * table
-075 *   
trthROW-KEY/ththFAM/QUAL/ththDATA/th/tr
-076 *   
trtdn.lt;namespacegt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-077 *   
trtdn.lt;namespacegt;/tdtdu:p/tdtdlt;namespace-quota
 policygt;/td/tr
-078 *   
trtdn.lt;namespacegt;/tdtdu:s/tdtdlt;SpaceQuotaSnapshotgt;/td/tr
-079 *   
trtdt.lt;tablegt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-080 *   
trtdt.lt;tablegt;/tdtdu:p/tdtdlt;table-quota
 policygt;/td/tr
-081 *   
trtdt.lt;tablegt;/tdtdu:ss.lt;snapshot
 
namegt;/tdtdlt;SpaceQuotaSnapshotgt;/td/tr
-082 *   
trtdu.lt;usergt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-083 *   
trtdu.lt;usergt;/tdtdq:s.lt;tablegt;/tdtdlt;table-quotasgt;/td/tr
-084 *   
trtdu.lt;usergt;/tdtdq:s.lt;nsgt;/tdtdlt;namespace-quotasgt;/td/tr
-085 * /table
-086 */
-087@InterfaceAudience.Private
-088@InterfaceStability.Evolving
-089public class QuotaTableUtil {
-090  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-091
-092  /** System table for quotas */
-093  public static final TableName 
QUOTA_TABLE_NAME =
-094  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-095
-096  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-097  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-098  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-099  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-100  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-101  protected static final byte[] 

[14/26] hbase-site git commit: Published site at e916b79db58bb9be806a833b2c0e675f1136c15a.

2017-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b75efae/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/class-use/PluginProtos.CodeGeneratorRequest.Builder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/class-use/PluginProtos.CodeGeneratorRequest.Builder.html
 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/class-use/PluginProtos.CodeGeneratorRequest.Builder.html
index a9be101..4babfa1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/class-use/PluginProtos.CodeGeneratorRequest.Builder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/class-use/PluginProtos.CodeGeneratorRequest.Builder.html
@@ -176,27 +176,34 @@
 
 
 PluginProtos.CodeGeneratorRequest.Builder
-PluginProtos.CodeGeneratorRequest.Builder.clearField(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptorfield)
+PluginProtos.CodeGeneratorRequest.Builder.clearCompilerVersion()
+
+ The version number of protocol compiler.
+
 
 
 PluginProtos.CodeGeneratorRequest.Builder
+PluginProtos.CodeGeneratorRequest.Builder.clearField(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptorfield)
+
+
+PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.clearFileToGenerate()
 
  The .proto files that were explicitly listed on the command-line.
 
 
-
+
 PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.clearOneof(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptoroneof)
 
-
+
 PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.clearParameter()
 
  The generator parameter passed on the command-line.
 
 
-
+
 PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.clearProtoFile()
 
@@ -204,10 +211,17 @@
  they import.
 
 
-
+
 PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.clone()
 
+
+PluginProtos.CodeGeneratorRequest.Builder
+PluginProtos.CodeGeneratorRequest.Builder.mergeCompilerVersion(PluginProtos.Versionvalue)
+
+ The version number of protocol compiler.
+
+
 
 PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStreaminput,
@@ -251,6 +265,20 @@
 
 
 PluginProtos.CodeGeneratorRequest.Builder
+PluginProtos.CodeGeneratorRequest.Builder.setCompilerVersion(PluginProtos.Version.BuilderbuilderForValue)
+
+ The version number of protocol compiler.
+
+
+
+PluginProtos.CodeGeneratorRequest.Builder
+PluginProtos.CodeGeneratorRequest.Builder.setCompilerVersion(PluginProtos.Versionvalue)
+
+ The version number of protocol compiler.
+
+
+
+PluginProtos.CodeGeneratorRequest.Builder
 PluginProtos.CodeGeneratorRequest.Builder.setField(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptorfield,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in 
java.lang">Objectvalue)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b75efae/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-frame.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-frame.html
 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-frame.html
index fe12821..1de0518 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-frame.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-frame.html
@@ -16,6 +16,7 @@
 PluginProtos.CodeGeneratorRequestOrBuilder
 PluginProtos.CodeGeneratorResponse.FileOrBuilder
 PluginProtos.CodeGeneratorResponseOrBuilder
+PluginProtos.VersionOrBuilder
 
 Classes
 
@@ -26,6 +27,8 @@
 PluginProtos.CodeGeneratorResponse.Builder
 PluginProtos.CodeGeneratorResponse.File
 PluginProtos.CodeGeneratorResponse.File.Builder
+PluginProtos.Version
+PluginProtos.Version.Builder
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b75efae/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-summary.html
 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-summary.html
index 2ec28d0..3aa5878 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-summary.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/package-summary.html
@@ -94,6 +94,10 @@
 

[14/26] hbase-site git commit: Published site at 7c54525c89bbbe0c66401813433bfb957e461eac.

2016-03-01 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c115ab43/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
new file mode 100644
index 000..ecceeb3
--- /dev/null
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
@@ -0,0 +1,1501 @@
+http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+Apache HBase - Exemplar for hbase-shaded-client archetype  
Reactor Dependency Convergence
+
+  @import url("./css/maven-base.css");
+  @import url("./css/maven-theme.css");
+  @import url("./css/site.css");
+
+
+
+
+
+
+  
+
+
+Apache HBase - Exemplar for hbase-shaded-client archetype
+
+
+
+  
+
+
+
+
+
+Last Published: 2016-03-01
+  | Version: 
2.0.0-SNAPSHOT
+  
+Apache HBase - Exemplar for 
hbase-shaded-client archetype
+  
+
+  
+  
+
+  
+
+
+  
+ 
+
+Parent Project
+  
+  
+  Apache HBase - Archetypes
+
+  
+   Project Documentation
+  
+   



   
+  Project Information
+
+  
+  Continuous Integration
+
+  
+  Dependencies
+
+  
+Dependency Convergence
+  
+  
+  Dependency Information
+
+  
+  Dependency Management
+
+  
+  About
+
+  
+  Issue Tracking
+
+  
+  Project License
+
+  
+  Mailing Lists
+
+  
+  Plugin Management
+
+  
+  Project Plugins
+
+  
+  Project Team
+
+  
+  Source Repository
+
+  
+  Project Summary
+
+  
+  Distribution Management
+
+  
+
+
+  Project Reports
+  
+  
+ http://maven.apache.org/; title="Built 
by Maven" class="poweredBy">
+
+  
+   
+
+
+
+
+  
+
+Reactor Dependency 
Convergence
+
+  Legend:
+
+
+
+All projects share one version of the dependency.
+
+
+At least one project has a differing version of the 
dependency.
+
+  Statistics:
+
+
+Number of sub-projects:
+28
+
+Number of dependencies (NOD):
+83
+
+Number of unique artifacts (NOA):
+83
+
+Number of SNAPSHOT artifacts (NOS):
+0
+
+Convergence (NOD/NOA):
+100%
+
+Ready for Release (100% Convergence and no SNAPSHOTS):
+Success
+
+Dependencies used in 
sub-projects
+
+com.github.stephenc.findbugs:findbugs-annotations
+
+
+
+
+
+
+1.3.9-1
+
+
+http://hbase.apache.org/hbase-annotations;>org.apache.hbase:hbase-annotations
+http://hbase.apache.org/hbase-archetypes/hbase-archetype-builder;>org.apache.hbase:hbase-archetype-builder
+http://hbase.apache.org/hbase-archetypes;>org.apache.hbase:hbase-archetypes
+http://hbase.apache.org/hbase-assembly;>org.apache.hbase:hbase-assembly
+http://hbase.apache.org/hbase-checkstyle;>org.apache.hbase:hbase-checkstyle
+http://hbase.apache.org/hbase-archetypes/hbase-client-project;>org.apache.hbase:hbase-client-project
+http://hbase.apache.org/hbase-client;>org.apache.hbase:hbase-client
+http://hbase.apache.org/hbase-common;>org.apache.hbase:hbase-common