[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
index 904b921..3531f22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
@@ -323,7 +323,7 @@
 315  @Override
 316  public void setMaxProcId(long 
maxProcId) {
 317assert lastProcId.get()  0 : 
"expected only one call to setMaxProcId()";
-318LOG.debug("Load maxProcId=" + 
maxProcId);
+318LOG.debug("Load max pid=" + 
maxProcId);
 319lastProcId.set(maxProcId);
 320  }
 321
@@ -735,7 +735,7 @@
 727   
!(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) 

 728   
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
 729  if (traceEnabled) {
-730LOG.trace("Waiting for procId=" + 
oldProcId.longValue() + " to be submitted");
+730LOG.trace("Waiting for pid=" + 
oldProcId.longValue() + " to be submitted");
 731  }
 732  Threads.sleep(100);
 733}
@@ -1007,9 +1007,9 @@
 999  public void removeResult(final long 
procId) {
 1000CompletedProcedureRetainer retainer 
= completed.get(procId);
 1001if (retainer == null) {
-1002  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
+1002  assert 
!procedures.containsKey(procId) : "pid=" + procId + " is still running";
 1003  if (LOG.isDebugEnabled()) {
-1004LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
+1004LOG.debug("pid=" + procId + " 
already removed by the cleaner.");
 1005  }
 1006  return;
 1007}
@@ -1357,7 +1357,7 @@
 1349  return 
LockState.LOCK_YIELD_WAIT;
 1350} catch (Throwable e) {
 1351  // Catch NullPointerExceptions or 
similar errors...
-1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception fo " + proc, e);
+1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception for " + proc, e);
 1353}
 1354
 1355// allows to kill the executor 
before something is stored to the wal.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
index 904b921..3531f22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -323,7 +323,7 @@
 315  @Override
 316  public void setMaxProcId(long 
maxProcId) {
 317assert lastProcId.get()  0 : 
"expected only one call to setMaxProcId()";
-318LOG.debug("Load maxProcId=" + 
maxProcId);
+318LOG.debug("Load max pid=" + 
maxProcId);
 319lastProcId.set(maxProcId);
 320  }
 321
@@ -735,7 +735,7 @@
 727   
!(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) 

 728   
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
 729  if (traceEnabled) {
-730LOG.trace("Waiting for procId=" + 
oldProcId.longValue() + " to be submitted");
+730LOG.trace("Waiting for pid=" + 
oldProcId.longValue() + " to be submitted");
 731  }
 732  Threads.sleep(100);
 733}
@@ -1007,9 +1007,9 @@
 999  public void removeResult(final long 
procId) {
 1000CompletedProcedureRetainer retainer 
= completed.get(procId);
 1001if (retainer == null) {
-1002  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
+1002  assert 
!procedures.containsKey(procId) : "pid=" + procId + " is still running";
 1003  if (LOG.isDebugEnabled()) {
-1004LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
+1004LOG.debug("pid=" + procId + " 
already removed by the cleaner.");
 1005  }
 1006  return;
 1007}
@@ -1357,7 +1357,7 @@
 1349  return 
LockState.LOCK_YIELD_WAIT;
 1350} catch (Throwable e) {
 1351  // Catch NullPointerExceptions or 
similar errors...
-1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception fo " + proc, e);
+1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception for " + proc, e);
 1353}
 1354
 1355// allows to 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index e780a6e..0645fef 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -266,16 +266,16 @@ service.
 
 
 
-private static HRegionInfo
-MetaTableAccessor.getHRegionInfo(Resultr,
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalHRegionInfo
+AsyncMetaTableAccessor.getHRegionInfo(Resultr,
   byte[]qualifier)
 Returns the HRegionInfo object from the column HConstants.CATALOG_FAMILY
 and
  qualifier of the catalog table result.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalHRegionInfo
-AsyncMetaTableAccessor.getHRegionInfo(Resultr,
+private static HRegionInfo
+MetaTableAccessor.getHRegionInfo(Resultr,
   byte[]qualifier)
 Returns the HRegionInfo object from the column HConstants.CATALOG_FAMILY
 and
  qualifier of the catalog table result.
@@ -290,7 +290,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  HRegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -299,7 +299,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  HRegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -307,55 +307,55 @@ service.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Resultr)
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
+AsyncMetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
-AsyncMetaTableAccessor.getRegionLocations(Resultr)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Resultr,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
+MetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Resultr,
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
+AsyncMetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
-AsyncMetaTableAccessor.getServerName(Resultr,
+static ServerName
+MetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
+AsyncMetaTableAccessor.getTableState(Resultr)
+
+
 static TableState
 MetaTableAccessor.getTableState(Resultr)
 Decode table state from META Result.
 
 
-
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
-AsyncMetaTableAccessor.getTableState(Resultr)
-
 
 void
 AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.onNext(Result[]results,
@@ -451,13 +451,13 @@ service.
 ClientScanner.cache
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
-BatchScanResultCache.partialResults
-
-
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
 CompleteScanResultCache.partialResults
 
+

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-spark/source-repository.html
--
diff --git a/hbase-spark/source-repository.html 
b/hbase-spark/source-repository.html
index eb467a0..063fc7a 100644
--- a/hbase-spark/source-repository.html
+++ b/hbase-spark/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-spark/team-list.html
--
diff --git a/hbase-spark/team-list.html b/hbase-spark/team-list.html
index 59f6609..a33f432 100644
--- a/hbase-spark/team-list.html
+++ b/hbase-spark/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/index.html
--
diff --git a/index.html b/index.html
index 045517f..793cd9d 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -438,7 +438,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-30
+  Last Published: 
2017-07-31
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/integration.html
--
diff --git a/integration.html b/integration.html
index 2f136ff..fb69f6a 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -296,7 +296,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-30
+  Last Published: 
2017-07-31
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index fd0a5b0..8a28c91 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Issue Management
 
@@ -293,7 +293,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-30
+  Last Published: 
2017-07-31
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/license.html
--
diff --git a/license.html b/license.html
index 858cb9b..a08aa28 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -281,209 +281,7 @@
 Project Licenses
 
 Apache License, Version 
2.0
-
- Apache License
-   Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-  License shall mean the terms and conditions for use, 
reproduction,
-  and distribution as defined by Sections 1 through 9 of this document.
-
-  Licensor shall mean the copyright owner or entity authorized 
by
-  the copyright owner that is granting the License.
-
-  Legal Entity shall mean the union of the acting entity and 
all
-  other entities that control, are controlled by, or are under common
-  control with that entity. For the purposes of this definition,
-  control means (i) the 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.html
index a93c99c..5ba50d5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.html
@@ -27,284 +27,284 @@
 019
 020import java.io.IOException;
 021import java.util.Iterator;
-022
-023import org.apache.hadoop.hbase.Cell;
-024import 
org.apache.hadoop.hbase.CellComparator;
-025import 
org.apache.hadoop.hbase.CellUtil;
-026import 
org.apache.hadoop.hbase.HConstants;
-027import 
org.apache.hadoop.hbase.KeyValue;
-028import 
org.apache.hadoop.hbase.KeyValue.Type;
-029import 
org.apache.hadoop.hbase.KeyValueUtil;
-030import org.apache.hadoop.hbase.Tag;
-031import org.apache.hadoop.hbase.TagType;
-032import org.apache.hadoop.hbase.TagUtil;
-033import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.filter.Filter;
-035import 
org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-036import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-037import 
org.apache.hadoop.hbase.regionserver.ScanInfo;
-038import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-039import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041
-042/**
-043 * A query matcher that is specifically 
designed for the scan case.
-044 */
-045@InterfaceAudience.Private
-046public abstract class ScanQueryMatcher 
implements ShipperListener {
-047
-048  /**
-049   * {@link #match} return codes. These 
instruct the scanner moving through memstores and StoreFiles
-050   * what to do with the current 
KeyValue.
-051   * p
-052   * Additionally, this contains 
"early-out" language to tell the scanner to move on to the next
-053   * File (memstore or Storefile), or to 
return immediately.
-054   */
-055  public static enum MatchCode {
-056/**
-057 * Include KeyValue in the returned 
result
-058 */
-059INCLUDE,
-060
-061/**
-062 * Do not include KeyValue in the 
returned result
-063 */
-064SKIP,
-065
-066/**
-067 * Do not include, jump to next 
StoreFile or memstore (in time order)
-068 */
-069NEXT,
-070
-071/**
-072 * Do not include, return current 
result
-073 */
-074DONE,
-075
-076/**
-077 * These codes are used by the 
ScanQueryMatcher
-078 */
+022import java.util.NavigableSet;
+023
+024import org.apache.hadoop.hbase.Cell;
+025import 
org.apache.hadoop.hbase.CellComparator;
+026import 
org.apache.hadoop.hbase.CellUtil;
+027import 
org.apache.hadoop.hbase.HConstants;
+028import 
org.apache.hadoop.hbase.KeyValue;
+029import 
org.apache.hadoop.hbase.KeyValue.Type;
+030import 
org.apache.hadoop.hbase.KeyValueUtil;
+031import org.apache.hadoop.hbase.Tag;
+032import org.apache.hadoop.hbase.TagType;
+033import org.apache.hadoop.hbase.TagUtil;
+034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+035import 
org.apache.hadoop.hbase.client.Scan;
+036import 
org.apache.hadoop.hbase.filter.Filter;
+037import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
+038import 
org.apache.hadoop.hbase.regionserver.ScanInfo;
+039import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+040import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult;
+041import 
org.apache.hadoop.hbase.security.visibility.VisibilityNewVersionBehaivorTracker;
+042import 
org.apache.hadoop.hbase.security.visibility.VisibilityScanDeleteTracker;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import 
org.apache.hadoop.hbase.util.Pair;
+045
+046/**
+047 * A query matcher that is specifically 
designed for the scan case.
+048 */
+049@InterfaceAudience.Private
+050public abstract class ScanQueryMatcher 
implements ShipperListener {
+051
+052  /**
+053   * {@link #match} return codes. These 
instruct the scanner moving through memstores and StoreFiles
+054   * what to do with the current 
KeyValue.
+055   * p
+056   * Additionally, this contains 
"early-out" language to tell the scanner to move on to the next
+057   * File (memstore or Storefile), or to 
return immediately.
+058   */
+059  public static enum MatchCode {
+060/**
+061 * Include KeyValue in the returned 
result
+062 */
+063INCLUDE,
+064
+065/**
+066 * Do not include KeyValue in the 
returned result
+067 */
+068SKIP,
+069
+070/**
+071 * Do not include, jump to next 
StoreFile or memstore (in time order)
+072 */
+073NEXT,
+074
+075/**
+076 * Do 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
index f16084a..7621348 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
@@ -28,209 +28,203 @@
 020
 021import java.io.IOException;
 022
-023import 
org.apache.hadoop.hbase.ProcedureInfo;
-024import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-025import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-026import 
org.apache.hadoop.hbase.procedure2.Procedure;
-027
-028/**
-029 * The ProcedureStore is used by the 
executor to persist the state of each procedure execution.
-030 * This allows to resume the execution of 
pending/in-progress procedures in case
-031 * of machine failure or service 
shutdown.
-032 */
-033@InterfaceAudience.Private
-034@InterfaceStability.Evolving
-035public interface ProcedureStore {
-036  /**
-037   * Store listener interface.
-038   * The main process should register a 
listener and respond to the store events.
-039   */
-040  public interface ProcedureStoreListener 
{
-041/**
-042 * triggered when the store sync is 
completed.
-043 */
-044void postSync();
-045
-046/**
-047 * triggered when the store is not 
able to write out data.
-048 * the main process should abort.
-049 */
-050void abortProcess();
-051  }
-052
-053  /**
-054   * An Iterator over a collection of 
Procedure
-055   */
-056  public interface ProcedureIterator {
-057/**
-058 * Reset the Iterator by seeking to 
the beginning of the list.
-059 */
-060void reset();
-061
-062/**
-063 * Returns true if the iterator has 
more elements.
-064 * (In other words, returns true if 
next() would return a Procedure
-065 * rather than throwing an 
exception.)
-066 * @return true if the iterator has 
more procedures
-067 */
-068boolean hasNext();
-069
-070/**
-071 * @return true if the iterator next 
element is a completed procedure.
-072 */
-073boolean isNextFinished();
-074
-075/**
-076 * Skip the next procedure
-077 */
-078void skipNext();
-079
-080/**
-081 * Returns the next procedure in the 
iteration.
-082 * @throws IOException if there was 
an error fetching/deserializing the procedure
-083 * @return the next procedure in the 
iteration.
-084 */
-085Procedure nextAsProcedure() throws 
IOException;
+023import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+024import 
org.apache.hadoop.hbase.classification.InterfaceStability;
+025import 
org.apache.hadoop.hbase.procedure2.Procedure;
+026
+027/**
+028 * The ProcedureStore is used by the 
executor to persist the state of each procedure execution.
+029 * This allows to resume the execution of 
pending/in-progress procedures in case
+030 * of machine failure or service 
shutdown.
+031 */
+032@InterfaceAudience.Private
+033@InterfaceStability.Evolving
+034public interface ProcedureStore {
+035  /**
+036   * Store listener interface.
+037   * The main process should register a 
listener and respond to the store events.
+038   */
+039  public interface ProcedureStoreListener 
{
+040/**
+041 * triggered when the store sync is 
completed.
+042 */
+043void postSync();
+044
+045/**
+046 * triggered when the store is not 
able to write out data.
+047 * the main process should abort.
+048 */
+049void abortProcess();
+050  }
+051
+052  /**
+053   * An Iterator over a collection of 
Procedure
+054   */
+055  public interface ProcedureIterator {
+056/**
+057 * Reset the Iterator by seeking to 
the beginning of the list.
+058 */
+059void reset();
+060
+061/**
+062 * Returns true if the iterator has 
more elements.
+063 * (In other words, returns true if 
next() would return a Procedure
+064 * rather than throwing an 
exception.)
+065 * @return true if the iterator has 
more procedures
+066 */
+067boolean hasNext();
+068
+069/**
+070 * @return true if the iterator next 
element is a completed procedure.
+071 */
+072boolean isNextFinished();
+073
+074/**
+075 * Skip the next procedure
+076 */
+077void skipNext();
+078
+079/**
+080 * Returns the next procedure in the 
iteration.
+081 * @throws IOException if there was 
an error fetching/deserializing the procedure
+082 * @return the next procedure in the 
iteration.
+083 */
+084Procedure next() throws 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/hbase-archetypes/hbase-client-project/license.html
--
diff --git a/hbase-archetypes/hbase-client-project/license.html 
b/hbase-archetypes/hbase-client-project/license.html
index 1b0e200..c397b94 100644
--- a/hbase-archetypes/hbase-client-project/license.html
+++ b/hbase-archetypes/hbase-client-project/license.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-22
+Last Published: 2017-07-23
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype
@@ -117,7 +117,210 @@
 Project Licenses
 
 Apache License, Version 
2.0
-Can't read the url [https://www.apache.org/licenses/LICENSE-2.0.txt] : 
connect timed out
+
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  License shall mean the terms and conditions for use, 
reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  Licensor shall mean the copyright owner or entity authorized 
by
+  the copyright owner that is granting the License.
+
+  Legal Entity shall mean the union of the acting entity and 
all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  control means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  You (or Your) shall mean an individual or Legal 
Entity
+  exercising permissions granted by this License.
+
+  Source form shall mean the preferred form for making 
modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  Object form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  Work shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  Derivative Works shall mean any work, whether in Source or 
Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works thereof.
+
+  Contribution shall mean any work of authorship, including
+  the original version of the Work and any modifications or additions
+  to that Work or Derivative Works thereof, that is intentionally
+  submitted to Licensor for inclusion in the Work by the copyright owner
+  or by an individual or Legal Entity authorized to submit on behalf of
+  the copyright owner. For the purposes of this definition, 
submitted
+  means any form of electronic, verbal, or written communication sent
+  to the Licensor or its representatives, including but not limited to
+  communication on electronic mailing lists, source code control systems,
+  and issue tracking systems that are managed by, or on behalf of, the
+  Licensor for the purpose of discussing and improving the Work, but
+  excluding communication that is conspicuously marked or otherwise
+  designated in writing by the copyright owner as Not a 
Contribution.
+
+  Contributor shall mean Licensor and any individual or Legal 
Entity
+  on behalf of whom a Contribution has been received by Licensor and
+  subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  copyright license to reproduce, prepare Derivative Works of,
+ 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index eb1dc67..498db42 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -155,229 +155,235 @@
 147for (Map.EntryString, byte[] 
entry : d.getAttributesMap().entrySet()) {
 148  this.setAttribute(entry.getKey(), 
entry.getValue());
 149}
-150  }
-151
-152  /**
-153   * Advanced use only.
-154   * Add an existing delete marker to 
this Delete object.
-155   * @param kv An existing KeyValue of 
type "delete".
-156   * @return this for invocation 
chaining
-157   * @throws IOException
-158   */
-159  @SuppressWarnings("unchecked")
-160  public Delete addDeleteMarker(Cell kv) 
throws IOException {
-161// TODO: Deprecate and rename 'add' 
so it matches how we add KVs to Puts.
-162if (!CellUtil.isDelete(kv)) {
-163  throw new IOException("The recently 
added KeyValue is not of type "
-164  + "delete. Rowkey: " + 
Bytes.toStringBinary(this.row));
-165}
-166if (!CellUtil.matchingRow(kv, 
this.row)) {
-167  throw new WrongRowIOException("The 
row in " + kv.toString() +
-168" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
-169}
-170byte [] family = 
CellUtil.cloneFamily(kv);
-171ListCell list = 
familyMap.get(family);
-172if (list == null) {
-173  list = new ArrayList(1);
-174}
-175list.add(kv);
-176familyMap.put(family, list);
-177return this;
-178  }
-179
+150super.setPriority(d.getPriority());
+151  }
+152
+153  /**
+154   * Advanced use only.
+155   * Add an existing delete marker to 
this Delete object.
+156   * @param kv An existing KeyValue of 
type "delete".
+157   * @return this for invocation 
chaining
+158   * @throws IOException
+159   */
+160  @SuppressWarnings("unchecked")
+161  public Delete addDeleteMarker(Cell kv) 
throws IOException {
+162// TODO: Deprecate and rename 'add' 
so it matches how we add KVs to Puts.
+163if (!CellUtil.isDelete(kv)) {
+164  throw new IOException("The recently 
added KeyValue is not of type "
+165  + "delete. Rowkey: " + 
Bytes.toStringBinary(this.row));
+166}
+167if (!CellUtil.matchingRow(kv, 
this.row)) {
+168  throw new WrongRowIOException("The 
row in " + kv.toString() +
+169" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
+170}
+171byte [] family = 
CellUtil.cloneFamily(kv);
+172ListCell list = 
familyMap.get(family);
+173if (list == null) {
+174  list = new ArrayList(1);
+175}
+176list.add(kv);
+177familyMap.put(family, list);
+178return this;
+179  }
 180
-181  /**
-182   * Delete all versions of all columns 
of the specified family.
-183   * p
-184   * Overrides previous calls to 
deleteColumn and deleteColumns for the
-185   * specified family.
-186   * @param family family name
-187   * @return this for invocation 
chaining
-188   */
-189  public Delete addFamily(final byte [] 
family) {
-190this.addFamily(family, this.ts);
-191return this;
-192  }
-193
-194  /**
-195   * Delete all columns of the specified 
family with a timestamp less than
-196   * or equal to the specified 
timestamp.
-197   * p
-198   * Overrides previous calls to 
deleteColumn and deleteColumns for the
-199   * specified family.
-200   * @param family family name
-201   * @param timestamp maximum version 
timestamp
-202   * @return this for invocation 
chaining
-203   */
-204  public Delete addFamily(final byte [] 
family, final long timestamp) {
-205if (timestamp  0) {
-206  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
-207}
-208ListCell list = 
familyMap.get(family);
-209if(list == null) {
-210  list = new ArrayList(1);
-211} else if(!list.isEmpty()) {
-212  list.clear();
-213}
-214KeyValue kv = new KeyValue(row, 
family, null, timestamp, KeyValue.Type.DeleteFamily);
-215list.add(kv);
-216familyMap.put(family, list);
-217return this;
-218  }
-219
-220  /**
-221   * Delete all columns of the specified 
family with a timestamp equal to
-222   * the specified timestamp.
-223   * @param family family name
-224   * @param timestamp version timestamp
-225   * @return this for invocation 
chaining
-226   */
-227  public Delete addFamilyVersion(final 
byte [] family, final long timestamp) {
-228ListCell list = 
familyMap.get(family);
-229if(list == null) {
-230  list = new ArrayList(1);
-231}
-232list.add(new KeyValue(row, family, 
null, timestamp,
-233  
KeyValue.Type.DeleteFamilyVersion));
-234familyMap.put(family, list);

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
index 0c07a2f..c90d203 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
@@ -34,553 +34,554 @@
 026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029import 
com.google.common.annotations.VisibleForTesting;
-030
-031import io.netty.buffer.ByteBuf;
-032import 
io.netty.buffer.ByteBufAllocator;
-033import io.netty.channel.Channel;
-034import 
io.netty.channel.ChannelHandler.Sharable;
-035import 
io.netty.channel.ChannelHandlerContext;
-036import io.netty.channel.EventLoop;
-037import 
io.netty.channel.SimpleChannelInboundHandler;
-038import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-039import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-040import 
io.netty.handler.timeout.IdleStateEvent;
-041import 
io.netty.handler.timeout.IdleStateHandler;
-042import io.netty.util.concurrent.Future;
-043import 
io.netty.util.concurrent.Promise;
-044import 
io.netty.util.concurrent.PromiseCombiner;
-045
-046import java.io.IOException;
-047import java.nio.ByteBuffer;
-048import java.util.ArrayDeque;
-049import java.util.Collection;
-050import java.util.Collections;
-051import java.util.Deque;
-052import java.util.IdentityHashMap;
-053import java.util.List;
-054import java.util.Set;
-055import 
java.util.concurrent.CompletableFuture;
-056import java.util.concurrent.TimeUnit;
-057import java.util.function.Supplier;
-058
-059import 
org.apache.hadoop.conf.Configuration;
-060import 
org.apache.hadoop.crypto.Encryptor;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-063import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
-064import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-065import 
org.apache.hadoop.hbase.util.FSUtils;
-066import 
org.apache.hadoop.hdfs.DFSClient;
-067import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-068import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-069import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-070import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-071import 
org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-072import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-073import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-074import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-075import 
org.apache.hadoop.util.DataChecksum;
-076
-077/**
-078 * An asynchronous HDFS output stream 
implementation which fans out data to datanode and only
-079 * supports writing file with only one 
block.
-080 * p
-081 * Use the createOutput method in {@link 
FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly
-082 * usage of this class is implementing 
WAL, so we only expose a little HDFS configurations in the
-083 * method. And we place it here under 
util package because we want to make it independent of WAL
-084 * implementation thus easier to move it 
to HDFS project finally.
-085 * p
-086 * Note that, all connections to datanode 
will run in the same {@link EventLoop} which means we only
-087 * need one thread here. But be careful, 
we do some blocking operations in {@link #close()} and
-088 * {@link 
#recoverAndClose(CancelableProgressable)} methods, so do not call them inside
-089 * {@link EventLoop}. And for {@link 
#write(byte[])} {@link #write(byte[], int, int)},
-090 * {@link #buffered()} and {@link 
#flush(boolean)}, if you call them outside {@link EventLoop},
-091 * there will be an extra 
context-switch.
-092 * p
-093 * Advantages compare to 
DFSOutputStream:
-094 * ol
-095 * liThe fan out mechanism. This 
will reduce the latency./li
-096 * liThe asynchronous WAL could 
also run in the same EventLoop, we could just call write and flush
-097 * inside the EventLoop thread, so 
generally we only have one thread to do all the things./li
-098 * liFail-fast when connection to 
datanode error. The WAL implementation could open new writer
-099 * ASAP./li
-100 * liWe could benefit from 
netty's ByteBuf management mechanism./li
-101 * /ol
-102 */
-103@InterfaceAudience.Private
-104public class FanOutOneBlockAsyncDFSOutput 
implements AsyncFSOutput {
-105
-106  // The MAX_PACKET_SIZE is 16MB but it 
include the header size and checksum size. So here we set a

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperMonitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperMonitor.html
index e1fbce4..873e17f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperMonitor.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index eb9099e..35d5549 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -232,2671 +232,2699 @@
 224import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
 225import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 226import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
-229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
-233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
-234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
-235import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
-236import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
-237import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
-238import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
-239import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
-240import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-241import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
-242import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
-243import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
-244import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
-245import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
-246import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-247import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-248import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-249import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-250import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-251import 
org.apache.hadoop.hbase.util.Bytes;
-252import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-253import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-254import 
org.apache.hadoop.hbase.util.Pair;
-255
-256/**
-257 * The implementation of AsyncAdmin.
-258 */
-259@InterfaceAudience.Private
-260public class RawAsyncHBaseAdmin 
implements AsyncAdmin {
-261  public static final String 
FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
-262
-263  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
+227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
+231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+235import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+236import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+237import 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyScannerHook.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyScannerHook.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyScannerHook.html
deleted file mode 100644
index 4196a6c..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyScannerHook.html
+++ /dev/null
@@ -1,1650 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/*
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019
-020package 
org.apache.hadoop.hbase.regionserver;
-021
-022import static 
org.junit.Assert.assertEquals;
-023import static 
org.junit.Assert.assertFalse;
-024import static 
org.junit.Assert.assertNull;
-025import static 
org.junit.Assert.assertTrue;
-026import static org.mockito.Matchers.any;
-027import static org.mockito.Mockito.spy;
-028import static 
org.mockito.Mockito.times;
-029import static 
org.mockito.Mockito.verify;
-030
-031import java.io.IOException;
-032import java.lang.ref.SoftReference;
-033import 
java.security.PrivilegedExceptionAction;
-034import java.util.ArrayList;
-035import java.util.Arrays;
-036import java.util.Collection;
-037import java.util.Collections;
-038import java.util.Iterator;
-039import java.util.List;
-040import java.util.ListIterator;
-041import java.util.NavigableSet;
-042import java.util.TreeSet;
-043import 
java.util.concurrent.ConcurrentSkipListSet;
-044import 
java.util.concurrent.CountDownLatch;
-045import 
java.util.concurrent.ExecutorService;
-046import java.util.concurrent.Executors;
-047import java.util.concurrent.TimeUnit;
-048import 
java.util.concurrent.atomic.AtomicBoolean;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import java.util.function.Consumer;
-051
-052import org.apache.commons.logging.Log;
-053import 
org.apache.commons.logging.LogFactory;
-054import 
org.apache.hadoop.conf.Configuration;
-055import 
org.apache.hadoop.fs.FSDataOutputStream;
-056import org.apache.hadoop.fs.FileStatus;
-057import org.apache.hadoop.fs.FileSystem;
-058import 
org.apache.hadoop.fs.FilterFileSystem;
-059import 
org.apache.hadoop.fs.LocalFileSystem;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.fs.permission.FsPermission;
-062import org.apache.hadoop.hbase.Cell;
-063import 
org.apache.hadoop.hbase.CellComparator;
-064import 
org.apache.hadoop.hbase.CellUtil;
-065import 
org.apache.hadoop.hbase.HBaseConfiguration;
-066import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-067import 
org.apache.hadoop.hbase.HColumnDescriptor;
-068import 
org.apache.hadoop.hbase.HConstants;
-069import 
org.apache.hadoop.hbase.HRegionInfo;
-070import 
org.apache.hadoop.hbase.HTableDescriptor;
-071import 
org.apache.hadoop.hbase.KeyValue;
-072import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-073import 
org.apache.hadoop.hbase.TableName;
-074import 
org.apache.hadoop.hbase.client.Get;
-075import 
org.apache.hadoop.hbase.client.Scan;
-076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-077import 
org.apache.hadoop.hbase.io.compress.Compression;
-078import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-079import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-080import 
org.apache.hadoop.hbase.io.hfile.HFile;
-081import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-082import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-083import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-084import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-085import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-086import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-087import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-088import 
org.apache.hadoop.hbase.security.User;
-089import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-090import 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionOperationWithResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionOperationWithResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionOperationWithResult.html
index 25d6b70..635798d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionOperationWithResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionOperationWithResult.html
@@ -1234,540 +1234,541 @@
 1226   * @param result the result returned 
by the append
 1227   * @throws IOException if an error 
occurred on the coprocessor
 1228   */
-1229  public void postAppend(final Append 
append, final Result result) throws IOException {
-1230execOperation(coprocessors.isEmpty() 
? null : new RegionOperation() {
-1231  @Override
-1232  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1233  throws IOException {
-1234oserver.postAppend(ctx, append, 
result);
-1235  }
-1236});
-1237  }
-1238
-1239  /**
-1240   * @param increment increment object
-1241   * @param result the result returned 
by postIncrement
-1242   * @throws IOException if an error 
occurred on the coprocessor
-1243   */
-1244  public Result postIncrement(final 
Increment increment, Result result) throws IOException {
-1245return 
execOperationWithResult(result,
-1246coprocessors.isEmpty() ? null : 
new RegionOperationWithResultResult() {
-1247  @Override
-1248  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1249  throws IOException {
-1250
setResult(oserver.postIncrement(ctx, increment, getResult()));
-1251  }
-1252});
-1253  }
-1254
-1255  /**
-1256   * @param scan the Scan 
specification
-1257   * @return scanner id to return to 
client if default operation should be
-1258   * bypassed, null otherwise
-1259   * @exception IOException Exception
-1260   */
-1261  public RegionScanner 
preScannerOpen(final Scan scan) throws IOException {
-1262return execOperationWithResult(true, 
null,
-1263coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1264  @Override
-1265  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1266  throws IOException {
-1267
setResult(oserver.preScannerOpen(ctx, scan, getResult()));
-1268  }
-1269});
-1270  }
-1271
-1272  /**
-1273   * See
-1274   * {@link 
RegionObserver#preStoreScannerOpen(ObserverContext,
-1275   *Store, Scan, NavigableSet, 
KeyValueScanner)}
-1276   */
-1277  public KeyValueScanner 
preStoreScannerOpen(final Store store, final Scan scan,
-1278  final NavigableSetbyte[] 
targetCols, final long readPt) throws IOException {
-1279return 
execOperationWithResult(null,
-1280coprocessors.isEmpty() ? null : 
new RegionOperationWithResultKeyValueScanner() {
-1281  @Override
-1282  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1283  throws IOException {
-1284
setResult(oserver.preStoreScannerOpen(ctx, store, scan, targetCols, 
getResult(), readPt));
-1285  }
-1286});
-1287  }
-1288
-1289  /**
-1290   * @param scan the Scan 
specification
-1291   * @param s the scanner
-1292   * @return the scanner instance to 
use
-1293   * @exception IOException Exception
-1294   */
-1295  public RegionScanner 
postScannerOpen(final Scan scan, RegionScanner s) throws IOException {
-1296return execOperationWithResult(s,
-1297coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1298  @Override
-1299  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1300  throws IOException {
-1301
setResult(oserver.postScannerOpen(ctx, scan, getResult()));
-1302  }
-1303});
-1304  }
-1305
-1306  /**
-1307   * @param s the scanner
-1308   * @param results the result set 
returned by the region server
-1309   * @param limit the maximum number of 
results to return
-1310   * @return 'has next' indication to 
client if bypassing default behavior, or
-1311   * null otherwise
-1312   * @exception IOException Exception
-1313   */
-1314  public Boolean preScannerNext(final 
InternalScanner s,
-1315  final ListResult results, 
final int limit) throws IOException {
-1316return execOperationWithResult(true, 
false,
-1317coprocessors.isEmpty() ? null : 
new RegionOperationWithResultBoolean() {
-1318  @Override
-1319  public void call(RegionObserver 
oserver, 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
index f729c99..0a32350 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
@@ -54,757 +54,756 @@
 046import 
io.netty.channel.ChannelPipeline;
 047import io.netty.channel.EventLoop;
 048import 
io.netty.channel.SimpleChannelInboundHandler;
-049import 
io.netty.channel.socket.nio.NioSocketChannel;
-050import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-051import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-052import 
io.netty.handler.timeout.IdleStateEvent;
-053import 
io.netty.handler.timeout.IdleStateHandler;
-054import io.netty.util.concurrent.Future;
-055import 
io.netty.util.concurrent.FutureListener;
-056import 
io.netty.util.concurrent.Promise;
-057
-058import java.io.IOException;
-059import 
java.lang.reflect.InvocationTargetException;
-060import java.lang.reflect.Method;
-061import java.util.ArrayList;
-062import java.util.EnumSet;
-063import java.util.List;
-064import java.util.concurrent.TimeUnit;
-065
-066import org.apache.commons.logging.Log;
-067import 
org.apache.commons.logging.LogFactory;
-068import 
org.apache.hadoop.conf.Configuration;
-069import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-070import 
org.apache.hadoop.crypto.Encryptor;
-071import org.apache.hadoop.fs.CreateFlag;
-072import org.apache.hadoop.fs.FileSystem;
-073import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-074import org.apache.hadoop.fs.Path;
-075import 
org.apache.hadoop.fs.UnresolvedLinkException;
-076import 
org.apache.hadoop.fs.permission.FsPermission;
-077import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-078import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-079import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-080import 
org.apache.hadoop.hbase.util.FSUtils;
-081import 
org.apache.hadoop.hdfs.DFSClient;
-082import 
org.apache.hadoop.hdfs.DFSOutputStream;
-083import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-084import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-085import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-086import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-088import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-092import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-093import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-100import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-102import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-103import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-104import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-105import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-106import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-107import 
org.apache.hadoop.io.EnumSetWritable;
-108import 
org.apache.hadoop.ipc.RemoteException;
-109import org.apache.hadoop.net.NetUtils;
-110import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-111import 
org.apache.hadoop.security.token.Token;
-112import 
org.apache.hadoop.util.DataChecksum;
-113
-114/**
-115 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-116 */
-117@InterfaceAudience.Private
-118public final class 
FanOutOneBlockAsyncDFSOutputHelper {
+049import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
+050import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+051import 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
index a5aadc9..05a7362 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":42,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":42,"i56":42,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":9,"i67":10,"i68":10,"i69":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":42,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":42,"i57":42,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":9,"i68":10,"i69":10,"i70":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -398,243 +398,249 @@ implements 
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
+Getter for accessing the metadata associated with the 
key.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytes
 getValues()
 Getter for fetching an unmodifiable values
 map.
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuilder
 getValues(booleanprintDefaults)
 
-
+
 boolean
 hasColumnFamily(byte[]familyName)
 Checks to see if this table contains the given column 
family
 
 
-
+
 boolean
 hasCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringclassNameToMatch)
 Check if the table has an attached co-processor represented 
by the name
  className
 
 
-
+
 int
 hashCode()
 
-
+
 boolean
 hasRegionMemstoreReplication()
 
-
+
 boolean
 hasSerialReplicationScope()
 Return true if there are at least one cf whose replication 
scope is
  serial.
 
 
-
+
 boolean
 isCompactionEnabled()
 Check if the compaction enable flag of the table is 
true.
 
 
-
+
 boolean
 isMetaRegion()
 Checks if this table is  hbase:meta  
region.
 
 
-
+
 boolean
 isMetaTable()
 Checks if the table is a hbase:meta table
 
 
-
+
 boolean
 isNormalizationEnabled()
 Check if normalization enable flag of the table is 
true.
 
 
-
+
 boolean
 isReadOnly()
 Check if the readOnly flag of the table is set.
 
 
-
+
 TableDescriptorBuilder.ModifyableTableDescriptor
 modifyColumnFamily(ColumnFamilyDescriptorfamily)
 Modifies the existing column family.
 
 
-
+
 private static TableDescriptor
 parseFrom(byte[]bytes)
 
-
+
 private TableDescriptorBuilder.ModifyableTableDescriptor
 putColumnFamily(ColumnFamilyDescriptorfamily)
 
-
+
 ColumnFamilyDescriptor
 removeColumnFamily(byte[]column)
 Removes the ColumnFamilyDescriptor with name specified by 
the parameter column
  from the table descriptor
 
 
-
+
 TableDescriptorBuilder.ModifyableTableDescriptor
 removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 Remove a config setting represented by the key from the
  configuration
 map
 
 
-
+
 void
 removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName)
 Remove a coprocessor from those set on 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/pseudo-distributed.html
--
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index e0add29..af1386c 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -313,7 +313,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/replication.html
--
diff --git a/replication.html b/replication.html
index d055220..7ca9a72 100644
--- a/replication.html
+++ b/replication.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Apache HBase (TM) Replication
@@ -308,7 +308,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/resources.html
--
diff --git a/resources.html b/resources.html
index 81c2036..dc9d422 100644
--- a/resources.html
+++ b/resources.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Other Apache HBase (TM) Resources
 
@@ -336,7 +336,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/source-repository.html
--
diff --git a/source-repository.html b/source-repository.html
index 55ed893..77af510 100644
--- a/source-repository.html
+++ b/source-repository.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Source Code Management
 
@@ -304,7 +304,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/sponsors.html
--
diff --git a/sponsors.html b/sponsors.html
index d021ef2..5d10b39 100644
--- a/sponsors.html
+++ b/sponsors.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Sponsors
 
@@ -338,7 +338,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/supportingprojects.html
--
diff --git a/supportingprojects.html b/supportingprojects.html
index f852a1b..1ba351c 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Supporting Projects
 
@@ -525,7 +525,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/team-list.html
--
diff --git a/team-list.html b/team-list.html
index 3a3b35e..7507e1a 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Team
 
@@ -675,7 +675,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-05
+  Last Published: 
2017-07-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/testdevapidocs/allclasses-frame.html

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
index 7f4301e..fb3286e 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MutableSegment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -202,11 +202,15 @@ extends getMinTimestamp()
 
 
+protected long
+indexEntrySize()
+
+
 boolean
 shouldSeek(Scanscan,
   longoldestUnexpiredTS)
 
-
+
 void
 upsert(Cellcell,
   longreadpoint,
@@ -218,7 +222,7 @@ extends Segment
-close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCell
 Set, getComparator,
 getFirstAfter,
 getMemStoreLAB,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator, keySize,
 last,
 maybeCloneWithAllocator,
 setCellSet,
 tailSet,
 toString,
 
 updateMetaInfo
+close,
 compare,
 compareRows,
 decScannerCount,
 dump,
 getCellLength,
 getCellsCount,
 getCell
 Set, getComparator,
 getFirstAfter,
 getMemStoreLAB,
 getMinSequenceId,
 getScanner,
 getScanner,
 getScanners,
 getTimeRangeTracker,
 headSet, heapSize,
 heapSizeChange,
 incScannerCount,
 incSize,
 internalAdd,
 isEmpty,
 isTagsPresent,
 iterator, keySize,
 last,
 maybeCloneWithAllocator,
 setCellSet,
 tailSet,
 toString,
 
 updateMetaInfo, updateMetaInfo
 
 
 
@@ -283,7 +287,7 @@ extends 
 
 add
-publicvoidadd(Cellcell,
+publicvoidadd(Cellcell,
 booleanmslabUsed,
 MemstoreSizememstoreSize)
 Adds the given cell into the segment
@@ -301,7 +305,7 @@ extends 
 
 upsert
-publicvoidupsert(Cellcell,
+publicvoidupsert(Cellcell,
longreadpoint,
MemstoreSizememstoreSize)
 
@@ -312,7 +316,7 @@ extends 
 
 first
-Cellfirst()
+Cellfirst()
 Returns the first cell in the segment
 
 Returns:
@@ -326,7 +330,7 @@ extends 
 
 shouldSeek
-publicbooleanshouldSeek(Scanscan,
+publicbooleanshouldSeek(Scanscan,
   longoldestUnexpiredTS)
 
 Specified by:
@@ -337,16 +341,29 @@ extends 
 
 
-
+
 
 getMinTimestamp
-publiclonggetMinTimestamp()
+publiclonggetMinTimestamp()
 
 Specified by:
 getMinTimestampin
 classSegment
 
 
 
+
+
+
+
+
+indexEntrySize
+protectedlongindexEntrySize()
+
+Specified by:
+indexEntrySizein
 classSegment
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
index 70560ae..2b3129c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":6,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":6,"i31":10,"i32":10,"i33":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":6,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":6,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":6,"i32":10,"i33":10,"i34":10,"i35":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -332,39 +332,43 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+protected abstract long
+indexEntrySize()
+
+
 protected void
 internalAdd(Cellcell,
booleanmslabUsed,
MemstoreSizememstoreSize)
 
-
+
 boolean
 isEmpty()
 
-
+
 boolean
 isTagsPresent()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorCell
 iterator()
 
-
+
 long
 keySize()
 
-
+
 Cell
 last()
 
-
+
 Cell
 maybeCloneWithAllocator(Cellcell)
 If the segment has a memory allocator the cell is being 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400
-1401  // Now if we found a region 
load get the type of cost that was requested.
-1402   

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import com.google.common.collect.Lists;
+058
+059/**
+060 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
index bc4042f..20bb545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipperThread.html
@@ -62,286 +62,309 @@
 054public class 
ReplicationSourceShipperThread extends Thread {
 055  private static final Log LOG = 
LogFactory.getLog(ReplicationSourceShipperThread.class);
 056
-057  protected final Configuration conf;
-058  protected final String walGroupId;
-059  protected final 
PriorityBlockingQueuePath queue;
-060  protected final 
ReplicationSourceInterface source;
-061
-062  // Last position in the log that we 
sent to ZooKeeper
-063  protected long lastLoggedPosition = 
-1;
-064  // Path of the current log
-065  protected volatile Path currentPath;
-066  // Indicates whether this particular 
worker is running
-067  private boolean workerRunning = true;
-068  protected 
ReplicationSourceWALReaderThread entryReader;
-069
-070  // How long should we sleep for each 
retry
-071  protected final long sleepForRetries;
-072  // Maximum number of retries before 
taking bold actions
-073  protected final int 
maxRetriesMultiplier;
-074
-075  // Use guava cache to set ttl for each 
key
-076  private final LoadingCacheString, 
Boolean canSkipWaitingSet = CacheBuilder.newBuilder()
-077  .expireAfterAccess(1, 
TimeUnit.DAYS).build(
-078  new CacheLoaderString, 
Boolean() {
-079@Override
-080public Boolean load(String key) 
throws Exception {
-081  return false;
-082}
-083  }
-084  );
-085
-086  public 
ReplicationSourceShipperThread(Configuration conf, String walGroupId,
-087  PriorityBlockingQueuePath 
queue, ReplicationSourceInterface source) {
-088this.conf = conf;
-089this.walGroupId = walGroupId;
-090this.queue = queue;
-091this.source = source;
-092this.sleepForRetries =
-093
this.conf.getLong("replication.source.sleepforretries", 1000);// 1 second
-094this.maxRetriesMultiplier =
-095
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes 
@ 1 sec per
-096  }
-097
-098  @Override
-099  public void run() {
-100// Loop until we close down
-101while (isActive()) {
-102  int sleepMultiplier = 1;
-103  // Sleep until replication is 
enabled again
-104  if (!source.isPeerEnabled()) {
-105if (sleepForRetries("Replication 
is disabled", sleepMultiplier)) {
-106  sleepMultiplier++;
-107}
-108continue;
-109  }
-110
-111  while (entryReader == null) {
-112if (sleepForRetries("Replication 
WAL entry reader thread not initialized",
-113  sleepMultiplier)) {
+057  // Hold the state of a replication 
worker thread
+058  public enum WorkerState {
+059RUNNING,
+060STOPPED,
+061FINISHED,  // The worker is done 
processing a recovered queue
+062  }
+063
+064  protected final Configuration conf;
+065  protected final String walGroupId;
+066  protected final 
PriorityBlockingQueuePath queue;
+067  protected final 
ReplicationSourceInterface source;
+068
+069  // Last position in the log that we 
sent to ZooKeeper
+070  protected long lastLoggedPosition = 
-1;
+071  // Path of the current log
+072  protected volatile Path currentPath;
+073  // Current state of the worker thread
+074  private WorkerState state;
+075  protected 
ReplicationSourceWALReaderThread entryReader;
+076
+077  // How long should we sleep for each 
retry
+078  protected final long sleepForRetries;
+079  // Maximum number of retries before 
taking bold actions
+080  protected final int 
maxRetriesMultiplier;
+081
+082  // Use guava cache to set ttl for each 
key
+083  private final LoadingCacheString, 
Boolean canSkipWaitingSet = CacheBuilder.newBuilder()
+084  .expireAfterAccess(1, 
TimeUnit.DAYS).build(
+085  new CacheLoaderString, 
Boolean() {
+086@Override
+087public Boolean load(String key) 
throws Exception {
+088  return false;
+089}
+090  }
+091  );
+092
+093  public 
ReplicationSourceShipperThread(Configuration conf, String walGroupId,
+094  PriorityBlockingQueuePath 
queue, ReplicationSourceInterface source) {
+095this.conf = conf;
+096this.walGroupId = walGroupId;
+097this.queue = queue;
+098this.source = source;
+099this.sleepForRetries =
+100
this.conf.getLong("replication.source.sleepforretries", 1000);// 1 second
+101

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 7961a62..a6f1e30 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10,"i110":10,"i111":10,"i112":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10,"i110":10,"i111":10,"i112":10,"i113":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -523,128 +523,135 @@ implements getNumRegionsOpened()
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+getOrderedRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions)
+Used when assign regions, this method will put system 
regions in
+ front of user regions
+
+
+
 private MasterProcedureEnv
 getProcedureEnvironment()
 
-
+
 private MasterProcedureScheduler
 getProcedureScheduler()
 
-
+
 HRegionInfo
 getRegionInfo(byte[]regionName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionStates.RegionStateNode
 getRegionsInTransition()
 
-
+
 RegionStates
 getRegionStates()
 
-
+
 RegionStateStore
 getRegionStateStore()
 
-
+
 Pairhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
 getReopenStatus(TableNametableName)
 Used by the client (via master) to identify if all regions 
have the schema updates
 
 
-
+
 int
 getServerVersion(ServerNameserverName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 getSnapShotOfAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHRegionInforegions)
 
-
+
 (package private) TableStateManager
 getTableStateManager()
 
-
+
 private void
 handleRegionOverStuckWarningThreshold(HRegionInforegionInfo)
 
-
+
 boolean
 hasRegionsInTransition()
 
-
+
 boolean
 isCarryingMeta(ServerNameserverName)
 
-
+
 private boolean
 isCarryingRegion(ServerNameserverName,
 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index de61f5f..743742a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -4,7 +4,7 @@
 
 
 
-ConnectionImplementation.ServerErrorTracker (Apache HBase 
2.0.0-SNAPSHOT API)
+ConnectionImplementation.ServerErrorTracker (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-ConnectionImplementation (Apache HBase 2.0.0-SNAPSHOT API)
+ConnectionImplementation (Apache HBase 3.0.0-SNAPSHOT API)