[14/30] hbase-site git commit: Published site at 931156f66b1decc19d89f8bb3ce9e5f355fb4fb2.

2018-10-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 5b9d987..b595018 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -310,2495 +310,2488 @@
 302}
 303
 304this.storeEngine = 
createStoreEngine(this, this.conf, this.comparator);
-305
this.storeEngine.getStoreFileManager().loadFiles(loadStoreFiles());
-306
-307// Initialize checksum type from 
name. The names are CRC32, CRC32C, etc.
-308this.checksumType = 
getChecksumType(conf);
-309// Initialize bytes per checksum
-310this.bytesPerChecksum = 
getBytesPerChecksum(conf);
-311flushRetriesNumber = conf.getInt(
-312
"hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
-313pauseTime = 
conf.getInt(HConstants.HBASE_SERVER_PAUSE, 
HConstants.DEFAULT_HBASE_SERVER_PAUSE);
-314if (flushRetriesNumber = 0) {
-315  throw new 
IllegalArgumentException(
-316  
"hbase.hstore.flush.retries.number must be  0, not "
-317  + flushRetriesNumber);
-318}
-319cryptoContext = 
EncryptionUtil.createEncryptionContext(conf, family);
-320
-321int confPrintThreshold =
-322
this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50);
-323if (confPrintThreshold  10) {
-324  confPrintThreshold = 10;
+305ListHStoreFile hStoreFiles = 
loadStoreFiles();
+306// Move the storeSize calculation out 
of loadStoreFiles() method, because the secondary read
+307// replica's refreshStoreFiles() will 
also use loadStoreFiles() to refresh its store files and
+308// update the storeSize in the 
completeCompaction(..) finally (just like compaction) , so
+309// no need calculate the storeSize 
twice.
+310
this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf - true));
+311
this.totalUncompressedBytes.addAndGet(getTotalUmcompressedBytes(hStoreFiles));
+312
this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles);
+313
+314// Initialize checksum type from 
name. The names are CRC32, CRC32C, etc.
+315this.checksumType = 
getChecksumType(conf);
+316// Initialize bytes per checksum
+317this.bytesPerChecksum = 
getBytesPerChecksum(conf);
+318flushRetriesNumber = conf.getInt(
+319
"hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER);
+320pauseTime = 
conf.getInt(HConstants.HBASE_SERVER_PAUSE, 
HConstants.DEFAULT_HBASE_SERVER_PAUSE);
+321if (flushRetriesNumber = 0) {
+322  throw new 
IllegalArgumentException(
+323  
"hbase.hstore.flush.retries.number must be  0, not "
+324  + flushRetriesNumber);
 325}
-326this.parallelPutCountPrintThreshold = 
confPrintThreshold;
-327LOG.info("Store={},  memstore 
type={}, storagePolicy={}, verifyBulkLoads={}, "
-328+ 
"parallelPutCountPrintThreshold={}, encoding={}, compression={}",
-329getColumnFamilyName(), 
memstore.getClass().getSimpleName(), policyName, verifyBulkLoads,
-330parallelPutCountPrintThreshold, 
family.getDataBlockEncoding(),
-331family.getCompressionType());
-332  }
-333
-334  /**
-335   * @return MemStore Instance to use in 
this store.
-336   */
-337  private MemStore getMemstore() {
-338MemStore ms = null;
-339// Check if in-memory-compaction 
configured. Note MemoryCompactionPolicy is an enum!
-340MemoryCompactionPolicy 
inMemoryCompaction = null;
-341if 
(this.getTableName().isSystemTable()) {
-342  inMemoryCompaction = 
MemoryCompactionPolicy.valueOf(
-343  
conf.get("hbase.systemtables.compacting.memstore.type", "NONE"));
-344} else {
-345  inMemoryCompaction = 
family.getInMemoryCompaction();
-346}
-347if (inMemoryCompaction == null) {
-348  inMemoryCompaction =
-349  
MemoryCompactionPolicy.valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-350  
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT).toUpperCase());
-351}
-352switch (inMemoryCompaction) {
-353  case NONE:
-354ms = 
ReflectionUtils.newInstance(DefaultMemStore.class,
-355new Object[] { conf, 
this.comparator,
-356
this.getHRegion().getRegionServicesForStores()});
-357break;
-358  default:
-359Class? extends 
CompactingMemStore clz = conf.getClass(MEMSTORE_CLASS_NAME,
-360CompactingMemStore.class, 
CompactingMemStore.class);
-361ms = 
ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this,
-362

[14/30] hbase-site git commit: Published site at a8e184dc77470bdf9d62e19c5d36bc1de7cf4c6d.

2018-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5fd895c6/devapidocs/org/apache/hadoop/hbase/security/User.SecureHadoopUser.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/User.SecureHadoopUser.html 
b/devapidocs/org/apache/hadoop/hbase/security/User.SecureHadoopUser.html
index 79f9b25..fe18026 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/User.SecureHadoopUser.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/User.SecureHadoopUser.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":10,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10};
+var methods = {"i0":9,"i1":10,"i2":10,"i3":9,"i4":9,"i5":9,"i6":10,"i7":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static final class User.SecureHadoopUser
+public static final class User.SecureHadoopUser
 extends User
 Bridges User invocations to underlying calls to
  UserGroupInformation for secure Hadoop
@@ -247,12 +247,19 @@ extends 
 
 
+static void
+login(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabLocation,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringprincipalName)
+Login through configured keytab and pricipal.
+
+
+
 TT
 runAs(https://docs.oracle.com/javase/8/docs/api/java/security/PrivilegedAction.html?is-external=true;
 title="class or interface in 
java.security">PrivilegedActionTaction)
 Executes the given action within the context of this 
user.
 
 
-
+
 TT
 runAs(https://docs.oracle.com/javase/8/docs/api/java/security/PrivilegedExceptionAction.html?is-external=true;
 title="class or interface in 
java.security">PrivilegedExceptionActionTaction)
 Executes the given action within the context of this 
user.
@@ -264,7 +271,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.security.User
-addToken,
 create,
 equals,
 getCurrent,
 getName,
 getToken,
 getTokens,
 getUGI,
 hashCode,
 isHBaseSecurityEnabled,
 runAsLoginUser,
 toString
+addToken,
 create,
 equals,
 getCurrent,
 getName,
 getToken,
 getTokens,
 getUGI,
 hashCode,
 isHBaseSecurityEnabled,
 isLoginFromKeytab,
 runAsLoginUser,
 shouldLoginFromKeytab,
 toString
 
 
 
@@ -293,7 +300,7 @@ extends 
 
 shortName
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String shortName
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String shortName
 
 
 
@@ -302,7 +309,7 @@ extends 
 
 cache
-privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCachehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[] cache
+privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCachehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[] cache
 
 
 
@@ -319,7 +326,7 @@ extends 
 
 SecureHadoopUser
-publicSecureHadoopUser()
+publicSecureHadoopUser()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -333,7 +340,7 @@ extends 
 
 SecureHadoopUser
-publicSecureHadoopUser(org.apache.hadoop.security.UserGroupInformationugi)
+publicSecureHadoopUser(org.apache.hadoop.security.UserGroupInformationugi)
 
 
 
@@ -342,7 +349,7 @@ extends 
 
 SecureHadoopUser
-publicSecureHadoopUser(org.apache.hadoop.security.UserGroupInformationugi,
+publicSecureHadoopUser(org.apache.hadoop.security.UserGroupInformationugi,
 
org.apache.hbase.thirdparty.com.google.common.cache.LoadingCachehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]cache)
 
 
@@ -360,7 +367,7 @@ extends 
 
 getShortName
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetShortName()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[14/30] hbase-site git commit: Published site at 59867eeeebd28fcc49f338ef36769fb6a9bff4dc.

2018-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67e3bccd/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 6cd6a17..85dd23b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -2119,6526 +2119,6532 @@
 2111  }
 2112
 2113  status = 
TaskMonitor.get().createStatus("Compacting " + store + " in " + this);
-2114  if (this.closed.get()) {
-2115String msg = "Skipping 
compaction on " + this + " because closed";
-2116LOG.debug(msg);
-2117status.abort(msg);
-2118return false;
-2119  }
-2120  boolean wasStateSet = false;
-2121  try {
-2122synchronized (writestate) {
-2123  if (writestate.writesEnabled) 
{
-2124wasStateSet = true;
-2125
writestate.compacting.incrementAndGet();
-2126  } else {
-2127String msg = "NOT compacting 
region " + this + ". Writes disabled.";
-2128LOG.info(msg);
-2129status.abort(msg);
-2130return false;
-2131  }
-2132}
-2133LOG.info("Starting compaction of 
{} in {}{}", store, this,
-2134
(compaction.getRequest().isOffPeak()?" as an off-peak compaction":""));
-2135doRegionCompactionPrep();
-2136try {
-2137  status.setStatus("Compacting 
store " + store);
-2138  // We no longer need to cancel 
the request on the way out of this
-2139  // method because 
Store#compact will clean up unconditionally
-2140  requestNeedsCancellation = 
false;
-2141  store.compact(compaction, 
throughputController, user);
-2142} catch (InterruptedIOException 
iioe) {
-2143  String msg = "compaction 
interrupted";
-2144  LOG.info(msg, iioe);
-2145  status.abort(msg);
-2146  return false;
-2147}
-2148  } finally {
-2149if (wasStateSet) {
-2150  synchronized (writestate) {
-2151
writestate.compacting.decrementAndGet();
-2152if 
(writestate.compacting.get() = 0) {
-2153  writestate.notifyAll();
-2154}
-2155  }
-2156}
-2157  }
-2158  status.markComplete("Compaction 
complete");
-2159  return true;
-2160} finally {
-2161  if (requestNeedsCancellation) 
store.cancelRequestedCompaction(compaction);
-2162  if (status != null) 
status.cleanup();
-2163}
-2164  }
-2165
-2166  /**
-2167   * Flush the cache.
-2168   *
-2169   * pWhen this method is called 
the cache will be flushed unless:
-2170   * ol
-2171   *   lithe cache is 
empty/li
-2172   *   lithe region is 
closed./li
-2173   *   lia flush is already in 
progress/li
-2174   *   liwrites are 
disabled/li
-2175   * /ol
-2176   *
-2177   * pThis method may block for 
some time, so it should not be called from a
-2178   * time-sensitive thread.
-2179   * @param force whether we want to 
force a flush of all stores
-2180   * @return FlushResult indicating 
whether the flush was successful or not and if
-2181   * the region needs compacting
-2182   *
-2183   * @throws IOException general io 
exceptions
-2184   * because a snapshot was not properly 
persisted.
-2185   */
-2186  // TODO HBASE-18905. We might have to 
expose a requestFlush API for CPs
-2187  public FlushResult flush(boolean 
force) throws IOException {
-2188return flushcache(force, false, 
FlushLifeCycleTracker.DUMMY);
-2189  }
-2190
-2191  public interface FlushResult {
-2192enum Result {
-2193  FLUSHED_NO_COMPACTION_NEEDED,
-2194  FLUSHED_COMPACTION_NEEDED,
-2195  // Special case where a flush 
didn't run because there's nothing in the memstores. Used when
-2196  // bulk loading to know when we 
can still load even if a flush didn't happen.
-2197  CANNOT_FLUSH_MEMSTORE_EMPTY,
-2198  CANNOT_FLUSH
-2199}
-2200
-2201/** @return the detailed result code 
*/
-2202Result getResult();
-2203
-2204/** @return true if the memstores 
were flushed, else false */
-2205boolean isFlushSucceeded();
-2206
-2207/** @return True if the flush 
requested a compaction, else false */
-2208boolean isCompactionNeeded();
-2209  }
+2114  
status.enableStatusJournal(false);
+2115  if (this.closed.get()) {
+2116String msg = "Skipping 
compaction on " + this + " because closed";
+2117LOG.debug(msg);
+2118status.abort(msg);
+2119return false;
+2120  }
+2121  boolean wasStateSet = false;
+2122  try {
+2123synchronized (writestate) {
+2124  if 

[14/30] hbase-site git commit: Published site at .

2018-01-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index 2bbae5e..9ddbcd4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -33,32 +33,32 @@
 025import java.net.BindException;
 026import java.net.InetAddress;
 027import java.net.InetSocketAddress;
-028import java.util.ArrayList;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.Comparator;
-032import java.util.HashSet;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.Objects;
-038import java.util.Set;
-039import java.util.SortedMap;
-040import java.util.TreeMap;
-041import java.util.TreeSet;
-042import 
java.util.concurrent.ConcurrentHashMap;
-043import 
java.util.concurrent.ConcurrentMap;
-044import 
java.util.concurrent.ConcurrentSkipListMap;
-045import 
java.util.concurrent.CountDownLatch;
-046import java.util.concurrent.TimeUnit;
-047import 
java.util.concurrent.atomic.AtomicBoolean;
-048import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-049import java.util.function.Function;
-050import 
javax.management.MalformedObjectNameException;
-051import javax.management.ObjectName;
-052import javax.servlet.http.HttpServlet;
-053import 
org.apache.commons.lang3.RandomUtils;
+028import java.time.Duration;
+029import java.util.ArrayList;
+030import java.util.Collection;
+031import java.util.Collections;
+032import java.util.Comparator;
+033import java.util.HashSet;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Map;
+037import java.util.Map.Entry;
+038import java.util.Objects;
+039import java.util.Set;
+040import java.util.SortedMap;
+041import java.util.TreeMap;
+042import java.util.TreeSet;
+043import 
java.util.concurrent.ConcurrentHashMap;
+044import 
java.util.concurrent.ConcurrentMap;
+045import 
java.util.concurrent.ConcurrentSkipListMap;
+046import 
java.util.concurrent.atomic.AtomicBoolean;
+047import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+048import java.util.function.Function;
+049import 
javax.management.MalformedObjectNameException;
+050import javax.management.ObjectName;
+051import javax.servlet.http.HttpServlet;
+052import 
org.apache.commons.lang3.RandomUtils;
+053import 
org.apache.commons.lang3.StringUtils;
 054import 
org.apache.commons.lang3.SystemUtils;
 055import 
org.apache.hadoop.conf.Configuration;
 056import org.apache.hadoop.fs.FileSystem;
@@ -177,16 +177,16 @@
 169import 
org.apache.hadoop.ipc.RemoteException;
 170import 
org.apache.hadoop.metrics2.util.MBeans;
 171import 
org.apache.hadoop.util.ReflectionUtils;
-172import 
org.apache.hadoop.util.StringUtils;
-173import 
org.apache.yetus.audience.InterfaceAudience;
-174import 
org.apache.zookeeper.KeeperException;
-175import org.slf4j.Logger;
-176import org.slf4j.LoggerFactory;
-177import sun.misc.Signal;
-178import sun.misc.SignalHandler;
-179
-180import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-181import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+172import 
org.apache.yetus.audience.InterfaceAudience;
+173import 
org.apache.zookeeper.KeeperException;
+174import org.slf4j.Logger;
+175import org.slf4j.LoggerFactory;
+176import sun.misc.Signal;
+177import sun.misc.SignalHandler;
+178
+179import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+180import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+181import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
 182import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 183import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
 184import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
@@ -393,13 +393,13 @@
 385  final AtomicBoolean online = new 
AtomicBoolean(false);
 386
 387  // zookeeper connection and watcher
-388  protected ZKWatcher zooKeeper;
+388  protected final ZKWatcher zooKeeper;
 389
 390  // master address tracker
-391  private MasterAddressTracker 
masterAddressTracker;
+391  private final MasterAddressTracker 
masterAddressTracker;
 392
 393  // Cluster Status Tracker
-394  protected ClusterStatusTracker 
clusterStatusTracker;
+394  protected final ClusterStatusTracker 
clusterStatusTracker;
 395
 396  // Log Splitting Worker
 397  private SplitLogWorker 
splitLogWorker;
@@ -532,3227 +532,3219 @@
 524  private 

[14/30] hbase-site git commit: Published site at .

2017-09-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc5c2985/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 6848d28..69caaf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -212,7941 +212,7899 @@
 204  public static final String 
LOAD_CFS_ON_DEMAND_CONFIG_KEY =
 205
"hbase.hregion.scan.loadColumnFamiliesOnDemand";
 206
-207  public static final String 
HREGION_UNASSIGN_FOR_FNFE = "hbase.hregion.unassign.for.fnfe";
-208  public static final boolean 
DEFAULT_HREGION_UNASSIGN_FOR_FNFE = true;
+207  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
+208  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 209
-210  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
-211  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
-212
-213  /**
-214   * This is the global default value for 
durability. All tables/mutations not
-215   * defining a durability or using 
USE_DEFAULT will default to this value.
-216   */
-217  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
-218
-219  final AtomicBoolean closed = new 
AtomicBoolean(false);
-220
-221  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-222   * want to do while in closing state; 
e.g. like offer this region up to the
-223   * master as a region to close if the 
carrying regionserver is overloaded.
-224   * Once set, it is never cleared.
-225   */
-226  final AtomicBoolean closing = new 
AtomicBoolean(false);
-227
-228  /**
-229   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-230   * less that this sequence id.
-231   */
-232  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-233
-234  /**
-235   * Record the sequence id of last flush 
operation. Can be in advance of
-236   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-237   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-238   */
-239  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-240
-241  /**
-242   * The sequence id of the last replayed 
open region event from the primary region. This is used
-243   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-244   * replication.
-245   */
-246  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-247  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-248
-249  
//
-250  // Members
-251  
//
-252
-253  // map from a locked row to the context 
for that lock including:
-254  // - CountDownLatch for threads waiting 
on that row
-255  // - the thread that owns the lock 
(allow reentrancy)
-256  // - reference count of (reentrant) 
locks held by the thread
-257  // - the row itself
-258  private final 
ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
-259  new ConcurrentHashMap();
+210  /**
+211   * This is the global default value for 
durability. All tables/mutations not
+212   * defining a durability or using 
USE_DEFAULT will default to this value.
+213   */
+214  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+215
+216  final AtomicBoolean closed = new 
AtomicBoolean(false);
+217
+218  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+219   * want to do while in closing state; 
e.g. like offer this region up to the
+220   * master as a region to close if the 
carrying regionserver is overloaded.
+221   * Once set, it is never cleared.
+222   */
+223  final AtomicBoolean closing = new 
AtomicBoolean(false);
+224
+225  /**
+226   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+227   * less that this sequence id.
+228   */
+229  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+230
+231  /**
+232   * Record the sequence id of last flush 
operation. Can be in advance of
+233   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+234   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+235   */
+236  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
+237
+238  /**
+239   * The sequence id of the last replayed 
open region event from the primary region. This is used
+240   * to skip entries before this due to 
the possibility of replay 

[14/30] hbase-site git commit: Published site at .

2017-09-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/plugins.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/plugins.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/plugins.html
index 60246b7..a01f1ae 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/plugins.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Plugins
 
@@ -226,7 +226,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-17
+  Last Published: 
2017-09-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-info.html
index 1d21aa4..61340d5 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Information
 
@@ -167,7 +167,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-17
+  Last Published: 
2017-09-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-reports.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-reports.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-reports.html
index 67ba257..473fbf4 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-reports.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Generated Reports
 
@@ -128,7 +128,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-17
+  Last Published: 
2017-09-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-summary.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-summary.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-summary.html
index c998456..4967fa6 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-summary.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Summary
 
@@ -166,7 +166,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-17
+  Last Published: 
2017-09-18
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/source-repository.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/source-repository.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/source-repository.html
index 4e0361a..e402a6f 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/source-repository.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/source-repository.html
@@ -7,7 +7,7 @@
   
 
   

[14/30] hbase-site git commit: Published site at .

2017-08-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/495ddb86/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
index a8f5704..7230e95 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityController.DeleteVersionVisibilityExpressionFilter.html
@@ -1082,56 +1082,60 @@
 1074public ReturnCode 
filterKeyValue(Cell cell) throws IOException {
 1075  ListTag putVisTags = new 
ArrayList();
 1076  Byte putCellVisTagsFormat = 
VisibilityUtils.extractVisibilityTags(cell, putVisTags);
-1077  boolean matchFound = 
VisibilityLabelServiceManager
-1078  
.getInstance().getVisibilityLabelService()
-1079  .matchVisibility(putVisTags, 
putCellVisTagsFormat, deleteCellVisTags,
-1080  
deleteCellVisTagsFormat);
-1081  return matchFound ? 
ReturnCode.INCLUDE : ReturnCode.SKIP;
-1082}
-1083  }
-1084
-1085  /**
-1086   * A RegionServerObserver impl that 
provides the custom
-1087   * VisibilityReplicationEndpoint. This 
class should be configured as the
-1088   * 
'hbase.coprocessor.regionserver.classes' for the visibility tags to be
-1089   * replicated as string.  The value 
for the configuration should be
-1090   * 
'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'.
-1091   */
-1092  public static class 
VisibilityReplication implements RegionServerObserver {
-1093private Configuration conf;
-1094private VisibilityLabelService 
visibilityLabelService;
-1095
-1096@Override
-1097public void 
start(CoprocessorEnvironment env) throws IOException {
-1098  this.conf = 
env.getConfiguration();
-1099  visibilityLabelService = 
VisibilityLabelServiceManager.getInstance()
-1100  
.getVisibilityLabelService(this.conf);
-1101}
-1102
-1103@Override
-1104public void 
stop(CoprocessorEnvironment env) throws IOException {
+1077  if (putVisTags.isEmpty() 
 deleteCellVisTags.isEmpty()) {
+1078// Early out if there are no 
tags in the cell
+1079return ReturnCode.INCLUDE;
+1080  }
+1081  boolean matchFound = 
VisibilityLabelServiceManager
+1082  
.getInstance().getVisibilityLabelService()
+1083  .matchVisibility(putVisTags, 
putCellVisTagsFormat, deleteCellVisTags,
+1084  
deleteCellVisTagsFormat);
+1085  return matchFound ? 
ReturnCode.INCLUDE : ReturnCode.SKIP;
+1086}
+1087  }
+1088
+1089  /**
+1090   * A RegionServerObserver impl that 
provides the custom
+1091   * VisibilityReplicationEndpoint. This 
class should be configured as the
+1092   * 
'hbase.coprocessor.regionserver.classes' for the visibility tags to be
+1093   * replicated as string.  The value 
for the configuration should be
+1094   * 
'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'.
+1095   */
+1096  public static class 
VisibilityReplication implements RegionServerObserver {
+1097private Configuration conf;
+1098private VisibilityLabelService 
visibilityLabelService;
+1099
+1100@Override
+1101public void 
start(CoprocessorEnvironment env) throws IOException {
+1102  this.conf = 
env.getConfiguration();
+1103  visibilityLabelService = 
VisibilityLabelServiceManager.getInstance()
+1104  
.getVisibilityLabelService(this.conf);
 1105}
 1106
 1107@Override
-1108public ReplicationEndpoint 
postCreateReplicationEndPoint(
-1109
ObserverContextRegionServerCoprocessorEnvironment ctx, 
ReplicationEndpoint endpoint) {
-1110  return new 
VisibilityReplicationEndpoint(endpoint, visibilityLabelService);
-}
-1112  }
-1113
-1114  /**
-1115   * @param t
-1116   * @return NameValuePair of the 
exception name to stringified version os exception.
-1117   */
-1118  // Copied from ResponseConverter and 
made private. Only used in here.
-1119  private static NameBytesPair 
buildException(final Throwable t) {
-1120NameBytesPair.Builder 
parameterBuilder = NameBytesPair.newBuilder();
-1121
parameterBuilder.setName(t.getClass().getName());
-1122parameterBuilder.setValue(
-1123  
ByteString.copyFromUtf8(StringUtils.stringifyException(t)));
-1124return parameterBuilder.build();
-1125  }
-1126}
+1108public void 
stop(CoprocessorEnvironment env) throws IOException {
+1109}
+1110
+@Override
+1112public ReplicationEndpoint 
postCreateReplicationEndPoint(
+1113

[14/30] hbase-site git commit: Published site at .

2017-08-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cef8af03/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index 7070913..244b038 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -123,3372 +123,3373 @@
 115import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 116import 
org.apache.hadoop.hbase.security.Superusers;
 117import 
org.apache.hadoop.hbase.security.User;
-118import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-119import 
org.apache.hadoop.hbase.shaded.com.google.common.cache.Cache;
-120import 
org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheBuilder;
-121import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-122import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-123import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-124import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-125import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-126import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-162import 

[14/30] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7f23ee04/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
index 5e9c67d..552ee2f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.html
@@ -69,125 +69,136 @@
 061  }
 062
 063  @Override
-064  protected void 
tryStartNewShipperThread(String walGroupId, PriorityBlockingQueuePath 
queue) {
-065final 
RecoveredReplicationSourceShipperThread worker =
-066new 
RecoveredReplicationSourceShipperThread(conf, walGroupId, queue, this,
+064  protected void 
tryStartNewShipper(String walGroupId, PriorityBlockingQueuePath queue) 
{
+065final 
RecoveredReplicationSourceShipper worker =
+066new 
RecoveredReplicationSourceShipper(conf, walGroupId, queue, this,
 067this.replicationQueues);
-068ReplicationSourceShipperThread extant 
= workerThreads.putIfAbsent(walGroupId, worker);
+068ReplicationSourceShipper extant = 
workerThreads.putIfAbsent(walGroupId, worker);
 069if (extant != null) {
 070  LOG.debug("Someone has beat us to 
start a worker thread for wal group " + walGroupId);
 071} else {
 072  LOG.debug("Starting up worker for 
wal group " + walGroupId);
 073  
worker.startup(getUncaughtExceptionHandler());
 074  worker.setWALReader(
-075
startNewWALReaderThread(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
+075
startNewWALReader(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
 076  workerThreads.put(walGroupId, 
worker);
 077}
 078  }
 079
-080  public void 
locateRecoveredPaths(PriorityBlockingQueuePath queue) throws 
IOException {
-081boolean hasPathChanged = false;
-082PriorityBlockingQueuePath 
newPaths =
-083new 
PriorityBlockingQueuePath(queueSizePerGroup, new LogsComparator());
-084pathsLoop: for (Path path : queue) 
{
-085  if (fs.exists(path)) { // still in 
same location, don't need to do anything
-086newPaths.add(path);
-087continue;
-088  }
-089  // Path changed - try to find the 
right path.
-090  hasPathChanged = true;
-091  if (stopper instanceof 
ReplicationSyncUp.DummyServer) {
-092// In the case of 
disaster/recovery, HMaster may be shutdown/crashed before flush data
-093// from .logs to .oldlogs. Loop 
into .logs folders and check whether a match exists
-094Path newPath = 
getReplSyncUpPath(path);
-095newPaths.add(newPath);
-096continue;
-097  } else {
-098// See if Path exists in the dead 
RS folder (there could be a chain of failures
-099// to look at)
-100ListString 
deadRegionServers = this.replicationQueueInfo.getDeadRegionServers();
-101LOG.info("NB dead servers : " + 
deadRegionServers.size());
-102final Path walDir = 
FSUtils.getWALRootDir(conf);
-103for (String curDeadServerName : 
deadRegionServers) {
-104  final Path deadRsDirectory =
-105  new Path(walDir, 
AbstractFSWALProvider.getWALDirectoryName(curDeadServerName));
-106  Path[] locs = new Path[] { new 
Path(deadRsDirectory, path.getName()), new Path(
-107  
deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT), path.getName()) 
};
-108  for (Path possibleLogLocation : 
locs) {
-109LOG.info("Possible location " 
+ possibleLogLocation.toUri().toString());
-110if 
(manager.getFs().exists(possibleLogLocation)) {
-111  // We found the right new 
location
-112  LOG.info("Log " + path + " 
still exists at " + possibleLogLocation);
-113  
newPaths.add(possibleLogLocation);
-114  continue pathsLoop;
-115}
-116  }
-117}
-118// didn't find a new location
-119LOG.error(
-120  String.format("WAL Path %s 
doesn't exist and couldn't find its new location", path));
-121newPaths.add(path);
-122  }
-123}
-124
-125if (hasPathChanged) {
-126  if (newPaths.size() != 
queue.size()) { // this shouldn't happen
-127LOG.error("Recovery queue size is 
incorrect");
-128throw new IOException("Recovery 
queue size error");
-129  }
-130  // put the correct locations in the 
queue
-131  // since this is a recovered queue 
with no new incoming logs,
-132  // there shouldn't be any 
concurrency issues
-133  queue.clear();
-134  for (Path path : newPaths) {
-135queue.add(path);
-136  }

[14/30] hbase-site git commit: Published site at 28cd48b673ca743d193874b2951bc995699e8e89.

2016-02-24 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/89b638a4/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
index 46c00c7..049b2e1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
-028@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "bcn", type = "String")})
+028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
+029@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -77,74 +77,74 @@
 069  return m_regionServer;
 070}
 071private HRegionServer 
m_regionServer;
-072// 22, 1
-073public void setFormat(String 
format)
+072// 24, 1
+073public void setBcv(String bcv)
 074{
-075  // 22, 1
-076  m_format = format;
-077  m_format__IsNotDefault = true;
+075  // 24, 1
+076  m_bcv = bcv;
+077  m_bcv__IsNotDefault = true;
 078}
-079public String getFormat()
+079public String getBcv()
 080{
-081  return m_format;
+081  return m_bcv;
 082}
-083private String m_format;
-084public boolean 
getFormat__IsNotDefault()
+083private String m_bcv;
+084public boolean 
getBcv__IsNotDefault()
 085{
-086  return m_format__IsNotDefault;
+086  return m_bcv__IsNotDefault;
 087}
-088private boolean 
m_format__IsNotDefault;
-089// 21, 1
-090public void setFilter(String 
filter)
+088private boolean 
m_bcv__IsNotDefault;
+089// 23, 1
+090public void setBcn(String bcn)
 091{
-092  // 21, 1
-093  m_filter = filter;
-094  m_filter__IsNotDefault = true;
+092  // 23, 1
+093  m_bcn = bcn;
+094  m_bcn__IsNotDefault = true;
 095}
-096public String getFilter()
+096public String getBcn()
 097{
-098  return m_filter;
+098  return m_bcn;
 099}
-100private String m_filter;
-101public boolean 
getFilter__IsNotDefault()
+100private String m_bcn;
+101public boolean 
getBcn__IsNotDefault()
 102{
-103  return m_filter__IsNotDefault;
+103  return m_bcn__IsNotDefault;
 104}
-105private boolean 
m_filter__IsNotDefault;
-106// 24, 1
-107public void setBcv(String bcv)
+105private boolean 
m_bcn__IsNotDefault;
+106// 22, 1
+107public void setFormat(String 
format)
 108{
-109  // 24, 1
-110  m_bcv = bcv;
-111  m_bcv__IsNotDefault = true;
+109  // 22, 1
+110  m_format = format;
+111  m_format__IsNotDefault = true;
 112}
-113public String getBcv()
+113public String getFormat()
 114{
-115  return m_bcv;
+115  return m_format;
 116}
-117private String m_bcv;
-118public boolean 
getBcv__IsNotDefault()
+117private String m_format;
+118public boolean 
getFormat__IsNotDefault()
 119{
-120  return m_bcv__IsNotDefault;
+120  return m_format__IsNotDefault;
 121}
-122private boolean 
m_bcv__IsNotDefault;
-123// 23, 1
-124public void setBcn(String bcn)
+122private boolean 
m_format__IsNotDefault;
+123// 21, 1
+124public void setFilter(String 
filter)
 125{
-126  // 23, 1
-127  m_bcn = bcn;
-128  m_bcn__IsNotDefault = true;
+126  // 21, 1
+127  m_filter = filter;
+128  m_filter__IsNotDefault = true;
 129}
-130public String getBcn()
+130public String getFilter()
 131{
-132  return m_bcn;
+132  return m_filter;
 133}
-134private String m_bcn;
-135public boolean 
getBcn__IsNotDefault()
+134private String m_filter;
+135public boolean 
getFilter__IsNotDefault()
 136{
-137  return m_bcn__IsNotDefault;
+137  return m_filter__IsNotDefault;
 138}
-139private boolean 
m_bcn__IsNotDefault;
+139private boolean 
m_filter__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -156,31 +156,31 @@
 148return (ImplData) 
super.getImplData();
 149  }
 150  
-151  protected String format;
-152  public final 

[14/30] hbase-site git commit: Published site at 845d00a16bc22cced0a2eead3d0ba48989968fb6.

2016-01-27 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 2a6a981..9da7c42 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-063@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-064@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+062@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+063@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
 065@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-067@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-068@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+066@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
 069@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-070@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName")})
+070@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,57 +116,57 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 21, 1
-112public void 
setFrags(MapString,Integer frags)
+111// 24, 1
+112public void 
setDeadServers(SetServerName deadServers)
 113{
-114  // 21, 1
-115  m_frags = frags;
-116  m_frags__IsNotDefault = true;
+114  // 24, 1
+115  m_deadServers = deadServers;
+116  m_deadServers__IsNotDefault = 
true;
 117}
-118public MapString,Integer 
getFrags()
+118public SetServerName 
getDeadServers()
 119{
-120  return m_frags;
+120  return m_deadServers;
 121}
-122private MapString,Integer 
m_frags;
-123public boolean 
getFrags__IsNotDefault()
+122private SetServerName 
m_deadServers;
+123public boolean 
getDeadServers__IsNotDefault()
 124{
-125  return m_frags__IsNotDefault;
+125  return 
m_deadServers__IsNotDefault;
 126}
-127private boolean 
m_frags__IsNotDefault;
-128// 22, 1
-129public void 
setMetaLocation(ServerName metaLocation)
+127private boolean 
m_deadServers__IsNotDefault;
+128// 26, 1
+129public void setFilter(String 
filter)
 130{
-131  // 22, 1
-132  m_metaLocation = metaLocation;
-133  m_metaLocation__IsNotDefault = 
true;
+131  // 26, 1
+132  m_filter = filter;
+133  m_filter__IsNotDefault = true;
 134}
-135public ServerName getMetaLocation()
+135public String getFilter()
 136{
-137  return m_metaLocation;
+137  return m_filter;
 138}
-139private ServerName m_metaLocation;
-140public boolean 
getMetaLocation__IsNotDefault()
+139private String m_filter;
+140public boolean 
getFilter__IsNotDefault()
 141{
-142  return 
m_metaLocation__IsNotDefault;
+142  return m_filter__IsNotDefault;
 143}
-144private boolean 
m_metaLocation__IsNotDefault;
-145// 29, 1
-146public void 
setAssignmentManager(AssignmentManager assignmentManager)
+144private boolean 
m_filter__IsNotDefault;
+145// 25, 1
+146public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 147{
-148  // 29, 1
-149  m_assignmentManager = 
assignmentManager;
-150  m_assignmentManager__IsNotDefault = 
true;
+148  // 25, 1
+149  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+150  
m_catalogJanitorEnabled__IsNotDefault = true;
 151}
-152public AssignmentManager 
getAssignmentManager()
+152public boolean 
getCatalogJanitorEnabled()
 153{
-154  return m_assignmentManager;
+154  return m_catalogJanitorEnabled;
 155}
-156private AssignmentManager 
m_assignmentManager;
-157public boolean 
getAssignmentManager__IsNotDefault()
+156