[16/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
new file mode 100644
index 000..c479bc8
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestAccessController.BulkLoadAccessTestAction (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.security.access
+Class TestAccessController.BulkLoadAccessTestAction
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/security/PrivilegedExceptionAction.html?is-external=true;
 title="class or interface in 
java.security">PrivilegedExceptionActionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object, SecureTestUtil.AccessTestAction
+
+
+Enclosing class:
+TestAccessController
+
+
+
+private class TestAccessController.BulkLoadAccessTestAction
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements SecureTestUtil.AccessTestAction
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.fs.permission.FsPermission
+filePermission
+
+
+private org.apache.hadoop.fs.Path
+testDataDir
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+BulkLoadAccessTestAction(org.apache.hadoop.fs.permission.FsPermissionperm,
+
org.apache.hadoop.fs.PathtestDataDir)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+run()
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, 

[16/37] hbase-site git commit: Published site at 409e742ac3bdbff027b136a87339f4f5511da07d.

2018-08-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7b180e2/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index cd509b8..a957d31 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -152,5137 +152,5182 @@
 144import 
org.apache.hadoop.util.ReflectionUtils;
 145import org.apache.hadoop.util.Tool;
 146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-148import 
org.apache.yetus.audience.InterfaceAudience;
-149import 
org.apache.yetus.audience.InterfaceStability;
-150import 
org.apache.zookeeper.KeeperException;
-151import org.slf4j.Logger;
-152import org.slf4j.LoggerFactory;
-153
-154import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-156import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-161
-162import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-164
-165/**
-166 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-167 * table integrity problems in a 
corrupted HBase.
-168 * p
-169 * Region consistency checks verify that 
hbase:meta, region deployment on region
-170 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-171 * accordance.
+147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+149import 
org.apache.yetus.audience.InterfaceAudience;
+150import 
org.apache.yetus.audience.InterfaceStability;
+151import 
org.apache.zookeeper.KeeperException;
+152import org.slf4j.Logger;
+153import org.slf4j.LoggerFactory;
+154
+155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
+160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
+161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
+162
+163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+165
+166/**
+167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
+168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
+169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
+170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
+171 *
 172 * p
-173 * Table integrity checks verify that all 
possible row keys resolve to exactly
-174 * one region of a table.  This means 
there are no individual degenerate
-175 * or backwards regions; no holes between 
regions; and that there are no
-176 * overlapping regions.
-177 * p
-178 * The general repair strategy works in 
two phases:
-179 * ol
-180 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-181 * li Repair Region Consistency 
with hbase:meta and assignments
-182 * /ol
-183 * p
-184 * For table integrity repairs, the 
tables' region directories are scanned
-185 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-186 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-187 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-188 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-189 * a new region is created and all data 
is merged into the new region.
-190 * p
-191 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-192 * offline -- the hbase region servers or 
master do not need to be running.
-193 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-194 * an offline fashion.
-195 * p
-196 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-197 * present in an HDFS region dir,  

[16/37] hbase-site git commit: Published site at .

2017-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd10ee7c/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.WALEntryBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.WALEntryBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.WALEntryBatch.html
index 0ddf249..94a9131 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.WALEntryBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.WALEntryBatch.html
@@ -135,378 +135,380 @@
 127  public void run() {
 128int sleepMultiplier = 1;
 129while (isReaderRunning()) { // we 
only loop back here if something fatal happened to our stream
-130  try (WALEntryStream entryStream = 
new WALEntryStream(logQueue, fs, conf, currentPosition,
-131  
source.getWALFileLengthProvider(), source.getSourceMetrics())) {
-132while (isReaderRunning()) { // 
loop here to keep reusing stream while we can
-133  if (!checkQuota()) {
-134continue;
-135  }
-136  WALEntryBatch batch = 
readWALEntries(entryStream);
-137  if (batch != null  
(!batch.getLastSeqIds().isEmpty() || batch.getNbEntries()  0)) {
-138if (LOG.isTraceEnabled()) {
-139  
LOG.trace(String.format("Read %s WAL entries eligible for replication",
-140batch.getNbEntries()));
-141}
-142entryBatchQueue.put(batch);
-143sleepMultiplier = 1;
-144  } else { // got no entries and 
didn't advance position in WAL
-145
handleEmptyWALEntryBatch(batch, entryStream.getCurrentPath());
-146  }
-147  currentPosition = 
entryStream.getPosition();
-148  entryStream.reset(); // reuse 
stream
-149}
-150  } catch (IOException e) { // stream 
related
-151if (sleepMultiplier  
maxRetriesMultiplier) {
-152  LOG.debug("Failed to read 
stream of replication entries: " + e);
-153  sleepMultiplier++;
-154} else {
-155  LOG.error("Failed to read 
stream of replication entries", e);
-156  handleEofException(e);
-157}
-158Threads.sleep(sleepForRetries * 
sleepMultiplier);
-159  } catch (InterruptedException e) 
{
-160LOG.trace("Interrupted while 
sleeping between WAL reads");
-161
Thread.currentThread().interrupt();
-162  }
-163}
-164  }
-165
-166  private WALEntryBatch 
readWALEntries(WALEntryStream entryStream) throws IOException {
-167WALEntryBatch batch = null;
-168while (entryStream.hasNext()) {
-169  if (batch == null) {
-170batch = new 
WALEntryBatch(replicationBatchCountCapacity, entryStream.getCurrentPath());
-171  }
-172  Entry entry = entryStream.next();
-173  if (updateSerialReplPos(batch, 
entry)) {
-174batch.lastWalPosition = 
entryStream.getPosition();
-175break;
-176  }
-177  entry = filterEntry(entry);
-178  if (entry != null) {
-179WALEdit edit = entry.getEdit();
-180if (edit != null  
!edit.isEmpty()) {
-181  long entrySize = 
getEntrySize(entry);
-182  batch.addEntry(entry);
-183  updateBatchStats(batch, entry, 
entryStream.getPosition(), entrySize);
-184  boolean totalBufferTooLarge = 
acquireBufferQuota(entrySize);
-185  // Stop if too many entries or 
too big
-186  if (totalBufferTooLarge || 
batch.getHeapSize() = replicationBatchSizeCapacity
-187  || batch.getNbEntries() 
= replicationBatchCountCapacity) {
-188break;
-189  }
-190}
-191  }
-192}
-193return batch;
-194  }
-195
-196  protected void 
handleEmptyWALEntryBatch(WALEntryBatch batch, Path currentPath)
-197  throws InterruptedException {
-198LOG.trace("Didn't read any new 
entries from WAL");
-199Thread.sleep(sleepForRetries);
-200  }
-201
-202  // if we get an EOF due to a 
zero-length log, and there are other logs in queue
-203  // (highly likely we've closed the 
current log), we've hit the max retries, and autorecovery is
-204  // enabled, then dump the log
-205  private void 
handleEofException(IOException e) {
-206if (e instanceof EOFException ||
-207e.getCause() instanceof 
EOFException  logQueue.size()  1  this.eofAutoRecovery) 
{
-208  try {
-209if 
(fs.getFileStatus(logQueue.peek()).getLen() == 0) {
-210  LOG.warn("Forcing removal of 0 
length log in queue: " + logQueue.peek());
-211  logQueue.remove();
-212  currentPosition = 0;
-213}
-214  } catch (IOException ioe) {
-215LOG.warn("Couldn't get file 
length information about log " + logQueue.peek());
-216  }

[16/37] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6cafca90/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index fe28fe2..86378be 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -1765,6408 +1765,6439 @@
 1757}
 1758  }
 1759
-1760  protected ThreadPoolExecutor 
getStoreOpenAndCloseThreadPool(
-1761  final String threadNamePrefix) {
-1762int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
-1763int maxThreads = 
Math.min(numStores,
-1764
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
-1765
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX));
-1766return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
-1767  }
-1768
-1769  protected ThreadPoolExecutor 
getStoreFileOpenAndCloseThreadPool(
-1770  final String threadNamePrefix) {
-1771int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
-1772int maxThreads = Math.max(1,
-1773
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
-1774
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX)
-1775/ numStores);
-1776return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
-1777  }
-1778
-1779  static ThreadPoolExecutor 
getOpenAndCloseThreadPool(int maxThreads,
-1780  final String threadNamePrefix) {
-1781return 
Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS,
-1782  new ThreadFactory() {
-1783private int count = 1;
-1784
-1785@Override
-1786public Thread newThread(Runnable 
r) {
-1787  return new Thread(r, 
threadNamePrefix + "-" + count++);
-1788}
-1789  });
+1760  @Override
+1761  public void waitForFlushes() {
+1762synchronized (writestate) {
+1763  if (this.writestate.readOnly) {
+1764// we should not wait for 
replayed flushed if we are read only (for example in case the
+1765// region is a secondary 
replica).
+1766return;
+1767  }
+1768  if (!writestate.flushing) 
return;
+1769  long start = 
System.currentTimeMillis();
+1770  boolean interrupted = false;
+1771  try {
+1772while (writestate.flushing) {
+1773  LOG.debug("waiting for cache 
flush to complete for region " + this);
+1774  try {
+1775writestate.wait();
+1776  } catch (InterruptedException 
iex) {
+1777// essentially ignore and 
propagate the interrupt back up
+1778LOG.warn("Interrupted while 
waiting");
+1779interrupted = true;
+1780  }
+1781}
+1782  } finally {
+1783if (interrupted) {
+1784  
Thread.currentThread().interrupt();
+1785}
+1786  }
+1787  long duration = 
System.currentTimeMillis() - start;
+1788  LOG.debug("Waited " + duration + " 
ms for flush to complete");
+1789}
 1790  }
-1791
-1792   /**
-1793* @return True if its worth doing a 
flush before we put up the close flag.
-1794*/
-1795  private boolean worthPreFlushing() {
-1796return this.memstoreDataSize.get() 

-1797  
this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5);
+1791  protected ThreadPoolExecutor 
getStoreOpenAndCloseThreadPool(
+1792  final String threadNamePrefix) {
+1793int numStores = Math.max(1, 
this.htableDescriptor.getFamilies().size());
+1794int maxThreads = 
Math.min(numStores,
+1795
conf.getInt(HConstants.HSTORE_OPEN_AND_CLOSE_THREADS_MAX,
+1796
HConstants.DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX));
+1797return 
getOpenAndCloseThreadPool(maxThreads, threadNamePrefix);
 1798  }
 1799
-1800  
//
-1801  // HRegion accessors
-1802  
//
-1803
-1804  @Override
-1805  public HTableDescriptor getTableDesc() 
{
-1806return this.htableDescriptor;
-1807  }
-1808
-1809  /** @return WAL in use for this region 
*/
-1810  public WAL getWAL() {
-1811return this.wal;
-1812  }
-1813
-1814  /**
-1815   * @return split policy for this 
region.
-1816   */
-1817  public RegionSplitPolicy 
getSplitPolicy() {
-1818return this.splitPolicy;
-1819  }
-1820
-1821  /**
-1822   * A split takes the config from the 
parent region  passes it to the daughter
-1823   * region's constructor. If 'conf' was 
passed, you would end up using the HTD
-1824   * of the parent region in addition to 
the new daughter HTD. Pass 'baseConf'
-1825   * to 

[16/37] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a949969b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.TimeoutExecutorThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.TimeoutExecutorThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.TimeoutExecutorThread.html
index a1495ff..0c11099 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.TimeoutExecutorThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.TimeoutExecutorThread.html
@@ -839,1019 +839,1030 @@
 831  private long pushProcedure(final 
Procedure proc) {
 832final long currentProcId = 
proc.getProcId();
 833
-834// Create the rollback stack for the 
procedure
-835RootProcedureState stack = new 
RootProcedureState();
-836rollbackStack.put(currentProcId, 
stack);
-837
-838// Submit the new subprocedures
-839assert 
!procedures.containsKey(currentProcId);
-840procedures.put(currentProcId, 
proc);
-841
sendProcedureAddedNotification(currentProcId);
-842scheduler.addBack(proc);
-843return proc.getProcId();
-844  }
-845
-846  /**
-847   * Send an abort notification the 
specified procedure.
-848   * Depending on the procedure 
implementation the abort can be considered or ignored.
-849   * @param procId the procedure to 
abort
-850   * @return true if the procedure exist 
and has received the abort, otherwise false.
-851   */
-852  public boolean abort(final long procId) 
{
-853return abort(procId, true);
-854  }
-855
-856  /**
-857   * Send an abort notification the 
specified procedure.
-858   * Depending on the procedure 
implementation the abort can be considered or ignored.
-859   * @param procId the procedure to 
abort
-860   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-861   * @return true if the procedure exist 
and has received the abort, otherwise false.
-862   */
-863  public boolean abort(final long procId, 
final boolean mayInterruptIfRunning) {
-864final Procedure proc = 
procedures.get(procId);
-865if (proc != null) {
-866  if (!mayInterruptIfRunning 
 proc.wasExecuted()) {
-867return false;
-868  }
-869  return 
proc.abort(getEnvironment());
-870}
-871return false;
-872  }
-873
-874  // 
==
-875  //  Executor query helpers
-876  // 
==
-877  public Procedure getProcedure(final 
long procId) {
-878return procedures.get(procId);
-879  }
-880
-881  public T extends Procedure T 
getProcedure(final ClassT clazz, final long procId) {
-882final Procedure proc = 
getProcedure(procId);
-883if (clazz.isInstance(proc)) {
-884  return (T)proc;
-885}
-886return null;
-887  }
-888
-889  public ProcedureInfo getResult(final 
long procId) {
-890return completed.get(procId);
-891  }
-892
-893  /**
-894   * Return true if the procedure is 
finished.
-895   * The state may be "completed 
successfully" or "failed and rolledback".
-896   * Use getResult() to check the state 
or get the result data.
-897   * @param procId the ID of the 
procedure to check
-898   * @return true if the procedure 
execution is finished, otherwise false.
-899   */
-900  public boolean isFinished(final long 
procId) {
-901return 
!procedures.containsKey(procId);
-902  }
-903
-904  /**
-905   * Return true if the procedure is 
started.
-906   * @param procId the ID of the 
procedure to check
-907   * @return true if the procedure 
execution is started, otherwise false.
-908   */
-909  public boolean isStarted(final long 
procId) {
-910final Procedure proc = 
procedures.get(procId);
-911if (proc == null) {
-912  return completed.get(procId) != 
null;
-913}
-914return proc.wasExecuted();
-915  }
-916
-917  /**
-918   * Mark the specified completed 
procedure, as ready to remove.
-919   * @param procId the ID of the 
procedure to remove
-920   */
-921  public void removeResult(final long 
procId) {
-922final ProcedureInfo result = 
completed.get(procId);
-923if (result == null) {
-924  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
-925  if (LOG.isDebugEnabled()) {
-926LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
-927  }
-928  return;
-929}
-930
-931// The CompletedProcedureCleaner will 
take care of deletion, once the TTL is expired.
-932
result.setClientAckTime(EnvironmentEdgeManager.currentTime());
-933  }
-934
-935  public PairProcedureInfo, 
Procedure getResultOrProcedure(final long procId) {
-936ProcedureInfo result = 
completed.get(procId);
-937Procedure proc = null;
-938if (result == 

[16/37] hbase-site git commit: Published site at 7063562bf1c0bfcc5efa791b8536a298edab8fcb.

2016-02-16 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6a178a98/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
index c51826f..dac2d4d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
@@ -102,1153 +102,1116 @@
 094public final class Canary implements Tool 
{
 095  // Sink interface used by the canary to 
outputs information
 096  public interface Sink {
-097public long getReadFailureCount();
-098public void 
publishReadFailure(HRegionInfo region, Exception e);
-099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-101public long getWriteFailureCount();
-102public void 
publishWriteFailure(HRegionInfo region, Exception e);
-103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-105  }
-106  // new extended sink for output 
regionserver mode info
-107  // do not change the Sink interface 
directly due to maintaining the API
-108  public interface ExtendedSink extends 
Sink {
-109public void publishReadFailure(String 
table, String server);
-110public void publishReadTiming(String 
table, String server, long msTime);
-111  }
-112
-113  // Simple implementation of canary sink 
that allows to plot on
-114  // file or standard output timings or 
failures.
-115  public static class StdOutSink 
implements Sink {
-116protected AtomicLong readFailureCount 
= new AtomicLong(0),
-117writeFailureCount = new 
AtomicLong(0);
+097public void 
publishReadFailure(HRegionInfo region, Exception e);
+098public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+099public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+100public void 
publishWriteFailure(HRegionInfo region, Exception e);
+101public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+102public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+103  }
+104  // new extended sink for output 
regionserver mode info
+105  // do not change the Sink interface 
directly due to maintaining the API
+106  public interface ExtendedSink extends 
Sink {
+107public void publishReadFailure(String 
table, String server);
+108public void publishReadTiming(String 
table, String server, long msTime);
+109  }
+110
+111  // Simple implementation of canary sink 
that allows to plot on
+112  // file or standard output timings or 
failures.
+113  public static class StdOutSink 
implements Sink {
+114@Override
+115public void 
publishReadFailure(HRegionInfo region, Exception e) {
+116  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
+117}
 118
 119@Override
-120public long getReadFailureCount() {
-121  return readFailureCount.get();
-122}
-123
-124@Override
-125public void 
publishReadFailure(HRegionInfo region, Exception e) {
-126  
readFailureCount.incrementAndGet();
-127  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-128}
-129
-130@Override
-131public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-132  
readFailureCount.incrementAndGet();
-133  LOG.error(String.format("read from 
region %s column family %s failed",
-134
region.getRegionNameAsString(), column.getNameAsString()), e);
-135}
-136
-137@Override
-138public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) 
{
-139  LOG.info(String.format("read from 
region %s column family %s in %dms",
-140   
region.getRegionNameAsString(), column.getNameAsString(), msTime));
-141}
-142
-143@Override
-144public long getWriteFailureCount() 
{
-145  return writeFailureCount.get();
+120public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
+121  LOG.error(String.format("read from 
region %s column family %s failed",
+122
region.getRegionNameAsString(), column.getNameAsString()), e);
+123}
+124
+125@Override
+126public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) 
{
+127  LOG.info(String.format("read from 
region %s column family %s in %dms",
+128