[13/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static TEnv void 
restart(final ProcedureExecutorTEnv procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor? procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static TEnv void 
restart(final ProcedureExecutorTEnv procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static TEnv void 
restart(final ProcedureExecutorTEnv procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static TEnv void 
restart(ProcedureExecutorTEnv procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, CallableVoid stopAction,
-082  CallableVoid 
actionBeforeStartWorker, CallableVoid startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static TEnv void 
restart(final ProcedureExecutorTEnv procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor? procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor? procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static TEnv void 
restart(ProcedureExecutorTEnv procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, CallableVoid stopAction,
+099  CallableVoid 
actionBeforeStartWorker, CallableVoid startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static TEnv void 
restart(ProcedureExecutorTEnv procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, CallableVoid stopAction,
+106  CallableVoid 
actionBeforeStartWorker, CallableVoid startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.testing = testing;
-119}
-120  }
-121
-122  public static void 
storeRestart(ProcedureStore procStore, 

[13/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

2018-08-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index cd509b8..a957d31 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -152,5137 +152,5182 @@
 144import 
org.apache.hadoop.util.ReflectionUtils;
 145import org.apache.hadoop.util.Tool;
 146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-148import 
org.apache.yetus.audience.InterfaceAudience;
-149import 
org.apache.yetus.audience.InterfaceStability;
-150import 
org.apache.zookeeper.KeeperException;
-151import org.slf4j.Logger;
-152import org.slf4j.LoggerFactory;
-153
-154import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-156import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-161
-162import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-164
-165/**
-166 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-167 * table integrity problems in a 
corrupted HBase.
-168 * p
-169 * Region consistency checks verify that 
hbase:meta, region deployment on region
-170 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-171 * accordance.
+147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+149import 
org.apache.yetus.audience.InterfaceAudience;
+150import 
org.apache.yetus.audience.InterfaceStability;
+151import 
org.apache.zookeeper.KeeperException;
+152import org.slf4j.Logger;
+153import org.slf4j.LoggerFactory;
+154
+155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
+160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
+161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
+162
+163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+165
+166/**
+167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
+168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
+169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
+170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
+171 *
 172 * p
-173 * Table integrity checks verify that all 
possible row keys resolve to exactly
-174 * one region of a table.  This means 
there are no individual degenerate
-175 * or backwards regions; no holes between 
regions; and that there are no
-176 * overlapping regions.
-177 * p
-178 * The general repair strategy works in 
two phases:
-179 * ol
-180 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-181 * li Repair Region Consistency 
with hbase:meta and assignments
-182 * /ol
-183 * p
-184 * For table integrity repairs, the 
tables' region directories are scanned
-185 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-186 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-187 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-188 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-189 * a new region is created and all data 
is merged into the new region.
-190 * p
-191 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-192 * offline -- the hbase region servers or 
master do not need to be running.
-193 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-194 * an offline fashion.
-195 * p
-196 * Region consistency requires three 
conditions -- 

[13/37] hbase-site git commit: Published site at .

2017-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd10ee7c/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
index 8e10cf2..c2b4026 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
@@ -48,452 +48,480 @@
 040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
 041import 
org.apache.hadoop.hbase.util.FSUtils;
 042import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-043
-044import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-045
-046/**
-047 * Base class of a WAL Provider that 
returns a single thread safe WAL that writes to Hadoop FS. By
-048 * default, this implementation picks a 
directory in Hadoop FS based on a combination of
-049 * ul
-050 * lithe HBase root directory
-051 * 
liHConstants.HREGION_LOGDIR_NAME
-052 * lithe given factory's 
factoryId (usually identifying the regionserver by host:port)
-053 * /ul
-054 * It also uses the providerId to 
differentiate among files.
-055 */
-056@InterfaceAudience.Private
-057@InterfaceStability.Evolving
-058public abstract class 
AbstractFSWALProviderT extends AbstractFSWAL? implements 
WALProvider {
-059
-060  private static final Log LOG = 
LogFactory.getLog(AbstractFSWALProvider.class);
-061
-062  // Only public so classes back in 
regionserver.wal can access
-063  public interface Reader extends 
WAL.Reader {
-064/**
-065 * @param fs File system.
-066 * @param path Path.
-067 * @param c Configuration.
-068 * @param s Input stream that may 
have been pre-opened by the caller; may be null.
-069 */
-070void init(FileSystem fs, Path path, 
Configuration c, FSDataInputStream s) throws IOException;
-071  }
-072
-073  protected volatile T wal;
-074  protected WALFactory factory = null;
-075  protected Configuration conf = null;
-076  protected 
ListWALActionsListener listeners = null;
-077  protected String providerId = null;
-078  protected AtomicBoolean initialized = 
new AtomicBoolean(false);
-079  // for default wal provider, logPrefix 
won't change
-080  protected String logPrefix = null;
-081
-082  /**
-083   * we synchronized on walCreateLock to 
prevent wal recreation in different threads
-084   */
-085  private final Object walCreateLock = 
new Object();
-086
-087  /**
-088   * @param factory factory that made us, 
identity used for FS layout. may not be null
-089   * @param conf may not be null
-090   * @param listeners may be null
-091   * @param providerId differentiate 
between providers from one factory, used for FS layout. may be
-092   *  null
-093   */
-094  @Override
-095  public void init(WALFactory factory, 
Configuration conf, ListWALActionsListener listeners,
-096  String providerId) throws 
IOException {
-097if (!initialized.compareAndSet(false, 
true)) {
-098  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-099}
-100this.factory = factory;
-101this.conf = conf;
-102this.listeners = listeners;
-103this.providerId = providerId;
-104// get log prefix
-105StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-106if (providerId != null) {
-107  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-108sb.append(providerId);
-109  } else {
-110
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-111  }
-112}
-113logPrefix = sb.toString();
-114doInit(conf);
-115  }
-116
-117  @Override
-118  public ListWAL getWALs() {
-119if (wal == null) {
-120  return Collections.emptyList();
-121}
-122ListWAL wals = new 
ArrayList(1);
-123wals.add(wal);
-124return wals;
-125  }
-126
-127  @Override
-128  public T getWAL(byte[] identifier, 
byte[] namespace) throws IOException {
-129T walCopy = wal;
-130if (walCopy == null) {
-131  // only lock when need to create 
wal, and need to lock since
-132  // creating hlog on fs is time 
consuming
-133  synchronized (walCreateLock) {
-134walCopy = wal;
-135if (walCopy == null) {
-136  walCopy = createWAL();
-137  wal = walCopy;
-138}
-139  }
-140}
-141return walCopy;
-142  }
-143
-144  protected abstract T createWAL() throws 
IOException;
-145
-146  protected abstract void 
doInit(Configuration conf) throws IOException;
-147
-148  @Override
-149  public void shutdown() throws 
IOException {
-150T log = this.wal;
-151if (log != null) {
-152  log.shutdown();
-153}
-154  }
-155
-156  @Override
-157  public void close() throws IOException 
{
-158T log = this.wal;

[13/37] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6cafca90/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index fe28fe2..86378be 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -1765,6408 +1765,6439 @@
 1757}
 1758  }
 1759
-1760  protected ThreadPoolExecutor 
getStoreOpenAndCloseThreadPool(
-1761  final String threadNamePrefix) {
-1762int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
-1763int maxThreads = 
Math.min(numStores,
-1764
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
-1765
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX));
-1766return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
-1767  }
-1768
-1769  protected ThreadPoolExecutor 
getStoreFileOpenAndCloseThreadPool(
-1770  final String threadNamePrefix) {
-1771int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
-1772int maxThreads = Math.max(1,
-1773
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
-1774
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX)
-1775/ numStores);
-1776return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
-1777  }
-1778
-1779  static ThreadPoolExecutor 
getOpenAndCloseThreadPool(int maxThreads,
-1780  final String threadNamePrefix) {
-1781return 
Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS,
-1782  new ThreadFactory() {
-1783private int count = 1;
-1784
-1785@Override
-1786public Thread newThread(Runnable 
r) {
-1787  return new Thread(r, 
threadNamePrefix + "-" + count++);
-1788}
-1789  });
+1760  @Override
+1761  public void waitForFlushes() {
+1762synchronized (writestate) {
+1763  if (this.writestate.readOnly) {
+1764// we should not wait for 
replayed flushed if we are read only (for example in case the
+1765// region is a secondary 
replica).
+1766return;
+1767  }
+1768  if (!writestate.flushing) 
return;
+1769  long start = 
System.currentTimeMillis();
+1770  boolean interrupted = false;
+1771  try {
+1772while (writestate.flushing) {
+1773  LOG.debug("waiting for cache 
flush to complete for region " + this);
+1774  try {
+1775writestate.wait();
+1776  } catch (InterruptedException 
iex) {
+1777// essentially ignore and 
propagate the interrupt back up
+1778LOG.warn("Interrupted while 
waiting");
+1779interrupted = true;
+1780  }
+1781}
+1782  } finally {
+1783if (interrupted) {
+1784  
Thread.currentThread().interrupt();
+1785}
+1786  }
+1787  long duration = 
System.currentTimeMillis() - start;
+1788  LOG.debug("Waited " + duration + " 
ms for flush to complete");
+1789}
 1790  }
-1791
-1792   /**
-1793* @return True if its worth doing a 
flush before we put up the close flag.
-1794*/
-1795  private boolean worthPreFlushing() {
-1796return this.memstoreDataSize.get() 

-1797  
this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5);
+1791  protected ThreadPoolExecutor 
getStoreOpenAndCloseThreadPool(
+1792  final String threadNamePrefix) {
+1793int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
+1794int maxThreads = 
Math.min(numStores,
+1795
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
+1796
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX));
+1797return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
 1798  }
 1799
-1800  
//
-1801  // HRegion accessors
-1802  
//
-1803
-1804  @Override
-1805  public HTableDescriptor getTableDesc() 
{
-1806return this.htableDescriptor;
-1807  }
-1808
-1809  /** @return WAL in use for this region 
*/
-1810  public WAL getWAL() {
-1811return this.wal;
-1812  }
-1813
-1814  /**
-1815   * @return split policy for this 
region.
-1816   */
-1817  public RegionSplitPolicy 
getSplitPolicy() {
-1818return this.splitPolicy;
-1819  }
-1820
-1821  /**
-1822   * A split takes the config from the 
parent region  passes it to the daughter
-1823   * region's constructor. If 'conf' was 
passed, you would end up using the HTD
-1824   * of the parent region in addition to 
the new daughter HTD. Pass 

[13/37] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a949969b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index a1495ff..0c11099 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -839,1019 +839,1030 @@
 831  private long pushProcedure(final 
Procedure proc) {
 832final long currentProcId = 
proc.getProcId();
 833
-834// Create the rollback stack for the 
procedure
-835RootProcedureState stack = new 
RootProcedureState();
-836rollbackStack.put(currentProcId, 
stack);
-837
-838// Submit the new subprocedures
-839assert 
!procedures.containsKey(currentProcId);
-840procedures.put(currentProcId, 
proc);
-841
sendProcedureAddedNotification(currentProcId);
-842scheduler.addBack(proc);
-843return proc.getProcId();
-844  }
-845
-846  /**
-847   * Send an abort notification the 
specified procedure.
-848   * Depending on the procedure 
implementation the abort can be considered or ignored.
-849   * @param procId the procedure to 
abort
-850   * @return true if the procedure exist 
and has received the abort, otherwise false.
-851   */
-852  public boolean abort(final long procId) 
{
-853return abort(procId, true);
-854  }
-855
-856  /**
-857   * Send an abort notification the 
specified procedure.
-858   * Depending on the procedure 
implementation the abort can be considered or ignored.
-859   * @param procId the procedure to 
abort
-860   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-861   * @return true if the procedure exist 
and has received the abort, otherwise false.
-862   */
-863  public boolean abort(final long procId, 
final boolean mayInterruptIfRunning) {
-864final Procedure proc = 
procedures.get(procId);
-865if (proc != null) {
-866  if (!mayInterruptIfRunning 
 proc.wasExecuted()) {
-867return false;
-868  }
-869  return 
proc.abort(getEnvironment());
-870}
-871return false;
-872  }
-873
-874  // 
==
-875  //  Executor query helpers
-876  // 
==
-877  public Procedure getProcedure(final 
long procId) {
-878return procedures.get(procId);
-879  }
-880
-881  public T extends Procedure T 
getProcedure(final ClassT clazz, final long procId) {
-882final Procedure proc = 
getProcedure(procId);
-883if (clazz.isInstance(proc)) {
-884  return (T)proc;
-885}
-886return null;
-887  }
-888
-889  public ProcedureInfo getResult(final 
long procId) {
-890return completed.get(procId);
-891  }
-892
-893  /**
-894   * Return true if the procedure is 
finished.
-895   * The state may be "completed 
successfully" or "failed and rolledback".
-896   * Use getResult() to check the state 
or get the result data.
-897   * @param procId the ID of the 
procedure to check
-898   * @return true if the procedure 
execution is finished, otherwise false.
-899   */
-900  public boolean isFinished(final long 
procId) {
-901return 
!procedures.containsKey(procId);
-902  }
-903
-904  /**
-905   * Return true if the procedure is 
started.
-906   * @param procId the ID of the 
procedure to check
-907   * @return true if the procedure 
execution is started, otherwise false.
-908   */
-909  public boolean isStarted(final long 
procId) {
-910final Procedure proc = 
procedures.get(procId);
-911if (proc == null) {
-912  return completed.get(procId) != 
null;
-913}
-914return proc.wasExecuted();
-915  }
-916
-917  /**
-918   * Mark the specified completed 
procedure, as ready to remove.
-919   * @param procId the ID of the 
procedure to remove
-920   */
-921  public void removeResult(final long 
procId) {
-922final ProcedureInfo result = 
completed.get(procId);
-923if (result == null) {
-924  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
-925  if (LOG.isDebugEnabled()) {
-926LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
-927  }
-928  return;
-929}
-930
-931// The CompletedProcedureCleaner will 
take care of deletion, once the TTL is expired.
-932
result.setClientAckTime(EnvironmentEdgeManager.currentTime());
-933  }
-934
-935  public PairProcedureInfo, 
Procedure getResultOrProcedure(final long procId) {
-936ProcedureInfo result = 
completed.get(procId);
-937Procedure proc = null;
-938if (result == null) {
-939  proc = procedures.get(procId);
-940  if (proc == null) {
-941result = 

[13/37] hbase-site git commit: Published site at 7063562bf1c0bfcc5efa791b8536a298edab8fcb.

2016-02-16 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6a178a98/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
index c51826f..dac2d4d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -102,1153 +102,1116 @@
 094public final class Canary implements Tool 
{
 095  // Sink interface used by the canary to 
outputs information
 096  public interface Sink {
-097public long getReadFailureCount();
-098public void 
publishReadFailure(HRegionInfo region, Exception e);
-099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-101public long getWriteFailureCount();
-102public void 
publishWriteFailure(HRegionInfo region, Exception e);
-103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-105  }
-106  // new extended sink for output 
regionserver mode info
-107  // do not change the Sink interface 
directly due to maintaining the API
-108  public interface ExtendedSink extends 
Sink {
-109public void publishReadFailure(String 
table, String server);
-110public void publishReadTiming(String 
table, String server, long msTime);
-111  }
-112
-113  // Simple implementation of canary sink 
that allows to plot on
-114  // file or standard output timings or 
failures.
-115  public static class StdOutSink 
implements Sink {
-116protected AtomicLong readFailureCount 
= new AtomicLong(0),
-117writeFailureCount = new 
AtomicLong(0);
+097public void 
publishReadFailure(HRegionInfo region, Exception e);
+098public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+099public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+100public void 
publishWriteFailure(HRegionInfo region, Exception e);
+101public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+102public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+103  }
+104  // new extended sink for output 
regionserver mode info
+105  // do not change the Sink interface 
directly due to maintaining the API
+106  public interface ExtendedSink extends 
Sink {
+107public void publishReadFailure(String 
table, String server);
+108public void publishReadTiming(String 
table, String server, long msTime);
+109  }
+110
+111  // Simple implementation of canary sink 
that allows to plot on
+112  // file or standard output timings or 
failures.
+113  public static class StdOutSink 
implements Sink {
+114@Override
+115public void 
publishReadFailure(HRegionInfo region, Exception e) {
+116  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
+117}
 118
 119@Override
-120public long getReadFailureCount() {
-121  return readFailureCount.get();
-122}
-123
-124@Override
-125public void 
publishReadFailure(HRegionInfo region, Exception e) {
-126  
readFailureCount.incrementAndGet();
-127  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-128}
-129
-130@Override
-131public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-132  
readFailureCount.incrementAndGet();
-133  LOG.error(String.format("read from 
region %s column family %s failed",
-134
region.getRegionNameAsString(), column.getNameAsString()), e);
-135}
-136
-137@Override
-138public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) 
{
-139  LOG.info(String.format("read from 
region %s column family %s in %dms",
-140   
region.getRegionNameAsString(), column.getNameAsString(), msTime));
-141}
-142
-143@Override
-144public long getWriteFailureCount() 
{
-145  return writeFailureCount.get();
+120public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
+121  LOG.error(String.format("read from 
region %s column family %s failed",
+122
region.getRegionNameAsString(), column.getNameAsString()), e);
+123}
+124
+125@Override
+126public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) 
{
+127  LOG.info(String.format("read from 
region %s column family %s in %dms",
+128   
region.getRegionNameAsString(),