[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index d8556be..d6a6b28 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -56,365 +56,371 @@
 048 * The AssignmentManager will notify this 
procedure when the RS completes
 049 * the operation and reports the 
transitioned state
 050 * (see the Assign and Unassign class for 
more detail).
-051 * pProcedures move from the 
REGION_TRANSITION_QUEUE state when they are
-052 * first submitted, to the 
REGION_TRANSITION_DISPATCH state when the request
-053 * to remote server is sent and the 
Procedure is suspended waiting on external
-054 * event to be woken again. Once the 
external event is triggered, Procedure
-055 * moves to the REGION_TRANSITION_FINISH 
state.
-056 *
-057 * pNOTE: {@link AssignProcedure} 
and {@link UnassignProcedure} should not be thought of
-058 * as being asymmetric, at least 
currently.
-059 * ul
-060 * li{@link AssignProcedure} 
moves through all the above described states and implements methods
-061 * associated with each while {@link 
UnassignProcedure} starts at state
-062 * REGION_TRANSITION_DISPATCH and state 
REGION_TRANSITION_QUEUE is not supported./li
-063 *
-064 * liWhen any step in {@link 
AssignProcedure} fails, failure handler
-065 * 
AssignProcedure#handleFailure(MasterProcedureEnv, RegionStateNode) re-attempts 
the
-066 * assignment by setting the procedure 
state to REGION_TRANSITION_QUEUE and forces
-067 * assignment to a different target 
server by setting {@link AssignProcedure#forceNewPlan}. When
-068 * the number of attempts reach hreshold 
configuration 'hbase.assignment.maximum.attempts',
-069 * the procedure is aborted. For {@link 
UnassignProcedure}, similar re-attempts are
-070 * intentionally not implemented. It is a 
'one shot' procedure.
-071 * /li
-072 * /ul
-073 *
-074 * pTODO: Considering it is a 
priority doing all we can to get make a region available as soon as possible,
-075 * re-attempting with any target makes 
sense if specified target fails in case of
-076 * {@link AssignProcedure}. For {@link 
UnassignProcedure}, if communication with RS fails,
-077 * similar re-attempt makes little sense 
(what should be different from previous attempt?). Also it
-078 * could be complex with current 
implementation of
-079 * {@link 
RegionTransitionProcedure#execute(MasterProcedureEnv)} and {@link 
UnassignProcedure}.
-080 * We have made a choice of keeping 
{@link UnassignProcedure} simple, where the procedure either
-081 * succeeds or fails depending on 
communication with RS. As parent will have broader context, parent
-082 * can better handle the failed instance 
of {@link UnassignProcedure}. Similar simplicity for
-083 * {@link AssignProcedure} is desired and 
should be explored/ discussed further.
-084 */
-085@InterfaceAudience.Private
-086public abstract class 
RegionTransitionProcedure
-087extends 
ProcedureMasterProcedureEnv
-088implements TableProcedureInterface,
-089  
RemoteProcedureMasterProcedureEnv, ServerName {
-090  private static final Log LOG = 
LogFactory.getLog(RegionTransitionProcedure.class);
-091
-092  protected final AtomicBoolean aborted = 
new AtomicBoolean(false);
-093
-094  private RegionTransitionState 
transitionState =
-095  
RegionTransitionState.REGION_TRANSITION_QUEUE;
-096  private HRegionInfo regionInfo;
-097  private volatile boolean lock = 
false;
-098
-099  public RegionTransitionProcedure() {
-100// Required by the Procedure 
framework to create the procedure on replay
-101super();
+051 *
+052 * pProcedures move from the 
REGION_TRANSITION_QUEUE state when they are
+053 * first submitted, to the 
REGION_TRANSITION_DISPATCH state when the request
+054 * to remote server is sent and the 
Procedure is suspended waiting on external
+055 * event to be woken again. Once the 
external event is triggered, Procedure
+056 * moves to the REGION_TRANSITION_FINISH 
state.
+057 *
+058 * pNOTE: {@link AssignProcedure} 
and {@link UnassignProcedure} should not be thought of
+059 * as being asymmetric, at least 
currently.
+060 * ul
+061 * li{@link AssignProcedure} 
moves through all the above described states and implements methods
+062 * associated with each while {@link 
UnassignProcedure} starts at state
+063 * REGION_TRANSITION_DISPATCH and state 
REGION_TRANSITION_QUEUE is not supported./li
+064 *
+065 * liWhen any step in {@link 
AssignProcedure} fails, failure handler
+066 * 
AssignProcedure#handleFailure(MasterProcedureEnv, 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
index 25c4fe1..1bff997 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
@@ -395,13 +395,13 @@ service.
 
 
 boolean
-HTable.exists(Getget)
+Table.exists(Getget)
 Test for the existence of columns in the table, as 
specified by the Get.
 
 
 
 boolean
-Table.exists(Getget)
+HTable.exists(Getget)
 Test for the existence of columns in the table, as 
specified by the Get.
 
 
@@ -416,16 +416,6 @@ service.
 HTableWrapper.exists(Getget)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-RawAsyncTableImpl.get(Getget)
-
-
-Result
-HTable.get(Getget)
-Extracts certain cells from a given row.
-
-
-
 Result
 Table.get(Getget)
 Extracts certain cells from a given row.
@@ -437,6 +427,16 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
+RawAsyncTableImpl.get(Getget)
+
+
+Result
+HTable.get(Getget)
+Extracts certain cells from a given row.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
 AsyncTableBase.get(Getget)
 Extracts certain cells from a given row.
 
@@ -479,13 +479,13 @@ service.
 
 
 boolean[]
-HTable.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+Table.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 Test for the existence of columns in the table, as 
specified by the Gets.
 
 
 
 boolean[]
-Table.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+HTable.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 Test for the existence of columns in the table, as 
specified by the Gets.
 
 
@@ -500,16 +500,6 @@ service.
 HTableWrapper.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-RawAsyncTableImpl.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
-
-
-Result[]
-HTable.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
-Extracts certain cells from the given rows, in batch.
-
-
-
 Result[]
 Table.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 Extracts certain cells from the given rows, in batch.
@@ -521,6 +511,16 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
+RawAsyncTableImpl.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+
+
+Result[]
+HTable.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+Extracts certain cells from the given rows, in batch.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
 AsyncTableBase.get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 Extracts certain cells from the given rows, in batch.
 


[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 69e4cac..d8d6f1b 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-30
+  Last Published: 
2017-07-31
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 7bb6f8d..5f6e1ca 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 397180e..ab4e6ab 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 2b1d18e..b95e812 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 3a23107..72af473 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-30
+Last Published: 2017-07-31
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 3539977..8f94a7e 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
   

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.html
index 45cb66f..6b18754 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/MajorCompactionScanQueryMatcher.html
@@ -39,54 +39,55 @@
 031public class 
MajorCompactionScanQueryMatcher extends DropDeletesCompactionScanQueryMatcher 
{
 032
 033  public 
MajorCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes,
-034  long readPointToUse, long 
earliestPutTs, long oldestUnexpiredTS, long now) {
-035super(scanInfo, deletes, 
readPointToUse, earliestPutTs, oldestUnexpiredTS, now);
-036  }
-037
-038  @Override
-039  public MatchCode match(Cell cell) 
throws IOException {
-040MatchCode returnCode = 
preCheck(cell);
-041if (returnCode != null) {
-042  return returnCode;
-043}
-044long timestamp = 
cell.getTimestamp();
-045long mvccVersion = 
cell.getSequenceId();
-046byte typeByte = cell.getTypeByte();
-047
-048// The delete logic is pretty 
complicated now.
-049// This is corroborated by the 
following:
-050// 1. The store might be instructed 
to keep deleted rows around.
-051// 2. A scan can optionally see past 
a delete marker now.
-052// 3. If deleted rows are kept, we 
have to find out when we can
-053// remove the delete markers.
-054// 4. Family delete markers are 
always first (regardless of their TS)
-055// 5. Delete markers should not be 
counted as version
-056// 6. Delete markers affect puts of 
the *same* TS
-057// 7. Delete marker need to be 
version counted together with puts
-058// they affect
-059//
-060if (CellUtil.isDelete(typeByte)) {
-061  if (mvccVersion  
maxReadPointToTrackVersions) {
-062// We can not drop this delete 
marker yet, and also we should not use this delete marker to
-063// mask any cell yet.
-064return MatchCode.INCLUDE;
-065  }
-066  trackDelete(cell);
-067  returnCode = tryDropDelete(cell);
-068  if (returnCode != null) {
-069return returnCode;
-070  }
-071} else {
-072  returnCode = checkDeleted(deletes, 
cell);
-073  if (returnCode != null) {
-074return returnCode;
-075  }
-076}
-077// Skip checking column since we do 
not remove column during compaction.
-078return columns.checkVersions(cell, 
timestamp, typeByte,
-079  mvccVersion  
maxReadPointToTrackVersions);
-080  }
-081}
+034  ColumnTracker columns, long 
readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
+035  long now) {
+036super(scanInfo, deletes, columns, 
readPointToUse, earliestPutTs, oldestUnexpiredTS, now);
+037  }
+038
+039  @Override
+040  public MatchCode match(Cell cell) 
throws IOException {
+041MatchCode returnCode = 
preCheck(cell);
+042if (returnCode != null) {
+043  return returnCode;
+044}
+045long timestamp = 
cell.getTimestamp();
+046long mvccVersion = 
cell.getSequenceId();
+047byte typeByte = cell.getTypeByte();
+048
+049// The delete logic is pretty 
complicated now.
+050// This is corroborated by the 
following:
+051// 1. The store might be instructed 
to keep deleted rows around.
+052// 2. A scan can optionally see past 
a delete marker now.
+053// 3. If deleted rows are kept, we 
have to find out when we can
+054// remove the delete markers.
+055// 4. Family delete markers are 
always first (regardless of their TS)
+056// 5. Delete markers should not be 
counted as version
+057// 6. Delete markers affect puts of 
the *same* TS
+058// 7. Delete marker need to be 
version counted together with puts
+059// they affect
+060//
+061if (CellUtil.isDelete(typeByte)) {
+062  if (mvccVersion  
maxReadPointToTrackVersions) {
+063// We can not drop this delete 
marker yet, and also we should not use this delete marker to
+064// mask any cell yet.
+065return MatchCode.INCLUDE;
+066  }
+067  trackDelete(cell);
+068  returnCode = tryDropDelete(cell);
+069  if (returnCode != null) {
+070return returnCode;
+071  }
+072} else {
+073  returnCode = checkDeleted(deletes, 
cell);
+074  if (returnCode != null) {
+075return returnCode;
+076  }
+077}
+078// Skip checking column since we do 
not remove column during compaction.
+079return columns.checkVersions(cell, 
timestamp, typeByte,
+080  mvccVersion  

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index afd9ccc..904b921 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -30,1916 +30,1984 @@
 022import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 023
 024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Set;
-032import 
java.util.concurrent.atomic.AtomicBoolean;
-033import 
java.util.concurrent.atomic.AtomicInteger;
-034import 
java.util.concurrent.atomic.AtomicLong;
-035import java.util.stream.Collectors;
-036import java.util.stream.Stream;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.CopyOnWriteArrayList;
-039import java.util.concurrent.DelayQueue;
-040import java.util.concurrent.TimeUnit;
-041
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.conf.Configuration;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.ProcedureInfo;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-049import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-050import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-051import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-052import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-053import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
-054import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout;
-055import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-057import 
org.apache.hadoop.hbase.security.User;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import 
org.apache.hadoop.hbase.util.NonceKey;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061import 
org.apache.hadoop.hbase.util.Threads;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077@InterfaceStability.Evolving
-078public class 
ProcedureExecutorTEnvironment {
-079  private static final Log LOG = 
LogFactory.getLog(ProcedureExecutor.class);
-080
-081  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-082  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-083
-084  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-085  
"hbase.procedure.worker.keep.alive.time.msec";
-086  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = Long.MAX_VALUE;
-087
-088  Testing testing = null;
-089  public static class Testing {
-090protected boolean killIfSuspended = 
false;
-091protected boolean 
killBeforeStoreUpdate = false;
-092protected boolean 
toggleKillBeforeStoreUpdate = false;
-093
-094protected boolean 
shouldKillBeforeStoreUpdate() {
-095  final boolean kill = 
this.killBeforeStoreUpdate;
-096  if 
(this.toggleKillBeforeStoreUpdate) {
-097this.killBeforeStoreUpdate = 
!kill;
-098LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-099  }
-100  return kill;
-101}
-102
-103protected boolean 
shouldKillBeforeStoreUpdate(final boolean isSuspended) {
-104  return (isSuspended  
!killIfSuspended) ? false : shouldKillBeforeStoreUpdate();
-105}
-106  }
-107
-108  public interface 
ProcedureExecutorListener {
-109void procedureLoaded(long procId);
-110void procedureAdded(long procId);
-111void procedureFinished(long 
procId);
-112  }
-113
-114  /**
-115   * Internal cleaner that removes the 
completed procedure results after a TTL.
-116   * NOTE: This is a special case handled 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index a945b54..422c076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -92,3454 +92,3410 @@
 084import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
 085import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
 086import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-087import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-088import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-089import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-090import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-091import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-092import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-093import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-094import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-095import 
org.apache.hadoop.hbase.ipc.QosPriority;
-096import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-097import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-098import 
org.apache.hadoop.hbase.ipc.RpcServer;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-100import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-101import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices;
-105import 
org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
-106import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-107import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-108import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-109import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-110import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-111import 
org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
-112import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-113import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-114import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-115import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-116import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-117import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-118import 
org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
-119import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-120import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-121import 
org.apache.hadoop.hbase.security.Superusers;
-122import 
org.apache.hadoop.hbase.security.User;
-123import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-124import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-125import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-126import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-127import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-128import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-143import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
index 41de383..e1f7fd7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -384,976 +384,984 @@
 376return new 
ModifyableColumnFamilyDescriptor(desc);
 377  }
 378
-379  private 
ColumnFamilyDescriptorBuilder(final byte[] name) {
-380this.desc = new 
ModifyableColumnFamilyDescriptor(name);
+379  public static ColumnFamilyDescriptor 
of(String name) {
+380return of(Bytes.toBytes(name));
 381  }
 382
-383  private 
ColumnFamilyDescriptorBuilder(final ColumnFamilyDescriptor desc) {
-384this.desc = new 
ModifyableColumnFamilyDescriptor(desc);
+383  public static ColumnFamilyDescriptor 
of(byte[] name) {
+384return newBuilder(name).build();
 385  }
 386
-387  /**
-388   * @param desc The table descriptor to 
serialize
-389   * @return This instance serialized 
with pb with pb magic prefix
-390   */
-391  public static byte[] 
toByteArray(ColumnFamilyDescriptor desc) {
-392if (desc instanceof 
ModifyableColumnFamilyDescriptor) {
-393  return 
((ModifyableColumnFamilyDescriptor) desc).toByteArray();
-394}
-395return new 
ModifyableColumnFamilyDescriptor(desc).toByteArray();
-396  }
-397
-398  public ColumnFamilyDescriptor build() 
{
-399return new 
ModifyableColumnFamilyDescriptor(desc);
-400  }
-401
-402  public ColumnFamilyDescriptorBuilder 
removeConfiguration(String key) {
-403desc.removeConfiguration(key);
-404return this;
-405  }
-406
-407  public ColumnFamilyDescriptorBuilder 
setBlockCacheEnabled(boolean value) {
-408desc.setBlockCacheEnabled(value);
-409return this;
-410  }
-411
-412  public ColumnFamilyDescriptorBuilder 
setBlocksize(int value) {
-413desc.setBlocksize(value);
-414return this;
-415  }
-416
-417  public ColumnFamilyDescriptorBuilder 
setBloomFilterType(final BloomType value) {
-418desc.setBloomFilterType(value);
-419return this;
-420  }
-421
-422  public ColumnFamilyDescriptorBuilder 
setCacheBloomsOnWrite(boolean value) {
-423desc.setCacheBloomsOnWrite(value);
-424return this;
-425  }
-426
-427  public ColumnFamilyDescriptorBuilder 
setCacheDataInL1(boolean value) {
-428desc.setCacheDataInL1(value);
-429return this;
-430  }
-431
-432  public ColumnFamilyDescriptorBuilder 
setCacheDataOnWrite(boolean value) {
-433desc.setCacheDataOnWrite(value);
-434return this;
-435  }
-436
-437  public ColumnFamilyDescriptorBuilder 
setCacheIndexesOnWrite(final boolean value) {
-438desc.setCacheIndexesOnWrite(value);
-439return this;
-440  }
-441
-442  public ColumnFamilyDescriptorBuilder 
setCompactionCompressionType(Compression.Algorithm value) {
-443
desc.setCompactionCompressionType(value);
-444return this;
-445  }
-446
-447  public ColumnFamilyDescriptorBuilder 
setCompressTags(boolean value) {
-448desc.setCompressTags(value);
-449return this;
-450  }
-451
-452  public ColumnFamilyDescriptorBuilder 
setCompressionType(Compression.Algorithm value) {
-453desc.setCompressionType(value);
-454return this;
-455  }
-456
-457  public ColumnFamilyDescriptorBuilder 
setConfiguration(final String key, final String value) {
-458desc.setConfiguration(key, value);
-459return this;
-460  }
-461
-462  public ColumnFamilyDescriptorBuilder 
setDFSReplication(short value) {
-463desc.setDFSReplication(value);
-464return this;
-465  }
-466
-467  public ColumnFamilyDescriptorBuilder 
setDataBlockEncoding(DataBlockEncoding value) {
-468desc.setDataBlockEncoding(value);
-469return this;
-470  }
-471
-472  public ColumnFamilyDescriptorBuilder 
setEncryptionKey(final byte[] value) {
-473desc.setEncryptionKey(value);
-474return this;
-475  }
-476
-477  public ColumnFamilyDescriptorBuilder 
setEncryptionType(String value) {
-478desc.setEncryptionType(value);
-479return this;
-480  }
-481
-482  public ColumnFamilyDescriptorBuilder 
setEvictBlocksOnClose(boolean value) {
-483desc.setEvictBlocksOnClose(value);
-484return this;
-485  }
-486
-487  public ColumnFamilyDescriptorBuilder 
setInMemory(final boolean value) {
-488desc.setInMemory(value);
-489return this;
-490  }
-491
-492  public ColumnFamilyDescriptorBuilder 
setInMemoryCompaction(final MemoryCompactionPolicy value) {
-493desc.setInMemoryCompaction(value);
-494return this;

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
index 0c07a2f..c90d203 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
@@ -34,553 +34,554 @@
 026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029import 
com.google.common.annotations.VisibleForTesting;
-030
-031import io.netty.buffer.ByteBuf;
-032import 
io.netty.buffer.ByteBufAllocator;
-033import io.netty.channel.Channel;
-034import 
io.netty.channel.ChannelHandler.Sharable;
-035import 
io.netty.channel.ChannelHandlerContext;
-036import io.netty.channel.EventLoop;
-037import 
io.netty.channel.SimpleChannelInboundHandler;
-038import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-039import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-040import 
io.netty.handler.timeout.IdleStateEvent;
-041import 
io.netty.handler.timeout.IdleStateHandler;
-042import io.netty.util.concurrent.Future;
-043import 
io.netty.util.concurrent.Promise;
-044import 
io.netty.util.concurrent.PromiseCombiner;
-045
-046import java.io.IOException;
-047import java.nio.ByteBuffer;
-048import java.util.ArrayDeque;
-049import java.util.Collection;
-050import java.util.Collections;
-051import java.util.Deque;
-052import java.util.IdentityHashMap;
-053import java.util.List;
-054import java.util.Set;
-055import 
java.util.concurrent.CompletableFuture;
-056import java.util.concurrent.TimeUnit;
-057import java.util.function.Supplier;
-058
-059import 
org.apache.hadoop.conf.Configuration;
-060import 
org.apache.hadoop.crypto.Encryptor;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-063import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
-064import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-065import 
org.apache.hadoop.hbase.util.FSUtils;
-066import 
org.apache.hadoop.hdfs.DFSClient;
-067import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-068import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-069import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-070import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-071import 
org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-072import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-073import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-074import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-075import 
org.apache.hadoop.util.DataChecksum;
-076
-077/**
-078 * An asynchronous HDFS output stream 
implementation which fans out data to datanode and only
-079 * supports writing file with only one 
block.
-080 * p
-081 * Use the createOutput method in {@link 
FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly
-082 * usage of this class is implementing 
WAL, so we only expose a little HDFS configurations in the
-083 * method. And we place it here under 
util package because we want to make it independent of WAL
-084 * implementation thus easier to move it 
to HDFS project finally.
-085 * p
-086 * Note that, all connections to datanode 
will run in the same {@link EventLoop} which means we only
-087 * need one thread here. But be careful, 
we do some blocking operations in {@link #close()} and
-088 * {@link 
#recoverAndClose(CancelableProgressable)} methods, so do not call them inside
-089 * {@link EventLoop}. And for {@link 
#write(byte[])} {@link #write(byte[], int, int)},
-090 * {@link #buffered()} and {@link 
#flush(boolean)}, if you call them outside {@link EventLoop},
-091 * there will be an extra 
context-switch.
-092 * p
-093 * Advantages compare to 
DFSOutputStream:
-094 * ol
-095 * liThe fan out mechanism. This 
will reduce the latency./li
-096 * liThe asynchronous WAL could 
also run in the same EventLoop, we could just call write and flush
-097 * inside the EventLoop thread, so 
generally we only have one thread to do all the things./li
-098 * liFail-fast when connection to 
datanode error. The WAL implementation could open new writer
-099 * ASAP./li
-100 * liWe could benefit from 
netty's ByteBuf management mechanism./li
-101 * /ol
-102 */
-103@InterfaceAudience.Private
-104public class FanOutOneBlockAsyncDFSOutput 
implements AsyncFSOutput {
-105
-106  // The MAX_PACKET_SIZE is 16MB but it 
include the header size and 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171}
-1172  }
-1173  return 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
index feb42ea..4bd98f4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+191import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+192import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyListHook.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyListHook.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyListHook.html
new file mode 100644
index 000..6c65fd1
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyListHook.html
@@ -0,0 +1,1810 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package 
org.apache.hadoop.hbase.regionserver;
+021
+022import static 
org.junit.Assert.assertEquals;
+023import static 
org.junit.Assert.assertFalse;
+024import static 
org.junit.Assert.assertNull;
+025import static 
org.junit.Assert.assertTrue;
+026import static org.mockito.Matchers.any;
+027import static org.mockito.Mockito.spy;
+028import static 
org.mockito.Mockito.times;
+029import static 
org.mockito.Mockito.verify;
+030
+031import java.io.IOException;
+032import java.lang.ref.SoftReference;
+033import 
java.security.PrivilegedExceptionAction;
+034import java.util.ArrayList;
+035import java.util.Arrays;
+036import java.util.Collection;
+037import java.util.Collections;
+038import java.util.Iterator;
+039import java.util.List;
+040import java.util.ListIterator;
+041import java.util.NavigableSet;
+042import java.util.TreeSet;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import 
java.util.concurrent.CountDownLatch;
+045import 
java.util.concurrent.ExecutorService;
+046import java.util.concurrent.Executors;
+047import java.util.concurrent.TimeUnit;
+048import 
java.util.concurrent.atomic.AtomicBoolean;
+049import 
java.util.concurrent.atomic.AtomicInteger;
+050import java.util.function.Consumer;
+051
+052import org.apache.commons.logging.Log;
+053import 
org.apache.commons.logging.LogFactory;
+054import 
org.apache.hadoop.conf.Configuration;
+055import 
org.apache.hadoop.fs.FSDataOutputStream;
+056import org.apache.hadoop.fs.FileStatus;
+057import org.apache.hadoop.fs.FileSystem;
+058import 
org.apache.hadoop.fs.FilterFileSystem;
+059import 
org.apache.hadoop.fs.LocalFileSystem;
+060import org.apache.hadoop.fs.Path;
+061import 
org.apache.hadoop.fs.permission.FsPermission;
+062import org.apache.hadoop.hbase.Cell;
+063import 
org.apache.hadoop.hbase.CellComparator;
+064import 
org.apache.hadoop.hbase.CellUtil;
+065import 
org.apache.hadoop.hbase.HBaseConfiguration;
+066import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+067import 
org.apache.hadoop.hbase.HColumnDescriptor;
+068import 
org.apache.hadoop.hbase.HConstants;
+069import 
org.apache.hadoop.hbase.HRegionInfo;
+070import 
org.apache.hadoop.hbase.HTableDescriptor;
+071import 
org.apache.hadoop.hbase.KeyValue;
+072import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
+073import 
org.apache.hadoop.hbase.TableName;
+074import 
org.apache.hadoop.hbase.client.Get;
+075import 
org.apache.hadoop.hbase.client.Scan;
+076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+077import 
org.apache.hadoop.hbase.io.compress.Compression;
+078import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+079import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+080import 
org.apache.hadoop.hbase.io.hfile.HFile;
+081import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
+082import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+083import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
+084import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+085import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
+086import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
+087import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
+088import 
org.apache.hadoop.hbase.security.User;
+089import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+090import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
index 25d6b70..635798d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
@@ -1234,540 +1234,541 @@
 1226   * @param result the result returned 
by the append
 1227   * @throws IOException if an error 
occurred on the coprocessor
 1228   */
-1229  public void postAppend(final Append 
append, final Result result) throws IOException {
-1230execOperation(coprocessors.isEmpty() 
? null : new RegionOperation() {
-1231  @Override
-1232  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1233  throws IOException {
-1234oserver.postAppend(ctx, append, 
result);
-1235  }
-1236});
-1237  }
-1238
-1239  /**
-1240   * @param increment increment object
-1241   * @param result the result returned 
by postIncrement
-1242   * @throws IOException if an error 
occurred on the coprocessor
-1243   */
-1244  public Result postIncrement(final 
Increment increment, Result result) throws IOException {
-1245return 
execOperationWithResult(result,
-1246coprocessors.isEmpty() ? null : 
new RegionOperationWithResultResult() {
-1247  @Override
-1248  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1249  throws IOException {
-1250
setResult(oserver.postIncrement(ctx, increment, getResult()));
-1251  }
-1252});
-1253  }
-1254
-1255  /**
-1256   * @param scan the Scan 
specification
-1257   * @return scanner id to return to 
client if default operation should be
-1258   * bypassed, null otherwise
-1259   * @exception IOException Exception
-1260   */
-1261  public RegionScanner 
preScannerOpen(final Scan scan) throws IOException {
-1262return execOperationWithResult(true, 
null,
-1263coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1264  @Override
-1265  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1266  throws IOException {
-1267
setResult(oserver.preScannerOpen(ctx, scan, getResult()));
-1268  }
-1269});
-1270  }
-1271
-1272  /**
-1273   * See
-1274   * {@link 
RegionObserver#preStoreScannerOpen(ObserverContext,
-1275   *Store, Scan, NavigableSet, 
KeyValueScanner)}
-1276   */
-1277  public KeyValueScanner 
preStoreScannerOpen(final Store store, final Scan scan,
-1278  final NavigableSetbyte[] 
targetCols, final long readPt) throws IOException {
-1279return 
execOperationWithResult(null,
-1280coprocessors.isEmpty() ? null : 
new RegionOperationWithResultKeyValueScanner() {
-1281  @Override
-1282  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1283  throws IOException {
-1284
setResult(oserver.preStoreScannerOpen(ctx, store, scan, targetCols, 
getResult(), readPt));
-1285  }
-1286});
-1287  }
-1288
-1289  /**
-1290   * @param scan the Scan 
specification
-1291   * @param s the scanner
-1292   * @return the scanner instance to 
use
-1293   * @exception IOException Exception
-1294   */
-1295  public RegionScanner 
postScannerOpen(final Scan scan, RegionScanner s) throws IOException {
-1296return execOperationWithResult(s,
-1297coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1298  @Override
-1299  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1300  throws IOException {
-1301
setResult(oserver.postScannerOpen(ctx, scan, getResult()));
-1302  }
-1303});
-1304  }
-1305
-1306  /**
-1307   * @param s the scanner
-1308   * @param results the result set 
returned by the region server
-1309   * @param limit the maximum number of 
results to return
-1310   * @return 'has next' indication to 
client if bypassing default behavior, or
-1311   * null otherwise
-1312   * @exception IOException Exception
-1313   */
-1314  public Boolean preScannerNext(final 
InternalScanner s,
-1315  final ListResult results, 
final int limit) throws IOException {
-1316return execOperationWithResult(true, 
false,
-1317coprocessors.isEmpty() ? null : 
new RegionOperationWithResultBoolean() {
-1318  @Override
-1319  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1320  throws 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
index f729c99..0a32350 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
@@ -54,757 +54,756 @@
 046import 
io.netty.channel.ChannelPipeline;
 047import io.netty.channel.EventLoop;
 048import 
io.netty.channel.SimpleChannelInboundHandler;
-049import 
io.netty.channel.socket.nio.NioSocketChannel;
-050import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-051import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-052import 
io.netty.handler.timeout.IdleStateEvent;
-053import 
io.netty.handler.timeout.IdleStateHandler;
-054import io.netty.util.concurrent.Future;
-055import 
io.netty.util.concurrent.FutureListener;
-056import 
io.netty.util.concurrent.Promise;
-057
-058import java.io.IOException;
-059import 
java.lang.reflect.InvocationTargetException;
-060import java.lang.reflect.Method;
-061import java.util.ArrayList;
-062import java.util.EnumSet;
-063import java.util.List;
-064import java.util.concurrent.TimeUnit;
-065
-066import org.apache.commons.logging.Log;
-067import 
org.apache.commons.logging.LogFactory;
-068import 
org.apache.hadoop.conf.Configuration;
-069import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-070import 
org.apache.hadoop.crypto.Encryptor;
-071import org.apache.hadoop.fs.CreateFlag;
-072import org.apache.hadoop.fs.FileSystem;
-073import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-074import org.apache.hadoop.fs.Path;
-075import 
org.apache.hadoop.fs.UnresolvedLinkException;
-076import 
org.apache.hadoop.fs.permission.FsPermission;
-077import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-078import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-079import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-080import 
org.apache.hadoop.hbase.util.FSUtils;
-081import 
org.apache.hadoop.hdfs.DFSClient;
-082import 
org.apache.hadoop.hdfs.DFSOutputStream;
-083import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-084import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-085import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-086import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-088import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-092import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-093import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-100import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-102import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-103import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-104import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-105import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-106import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-107import 
org.apache.hadoop.io.EnumSetWritable;
-108import 
org.apache.hadoop.ipc.RemoteException;
-109import org.apache.hadoop.net.NetUtils;
-110import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-111import 
org.apache.hadoop.security.token.Token;
-112import 
org.apache.hadoop.util.DataChecksum;
-113
-114/**
-115 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-116 */
-117@InterfaceAudience.Private
-118public final class 
FanOutOneBlockAsyncDFSOutputHelper {
+049import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
+050import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+051import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index f403a65..1a2a97a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RawAsyncHBaseAdmin
+public class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -708,11 +708,29 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isCatalogJanitorOn()
+Query on the catalog janitor state.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isCleanerChoreOn()
+Query the current state of the cleaner chore.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 isMasterInMaintenanceMode()
 Check whether master is in maintenance mode
 
 
-
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+isNormalizerOn()
+Query the current state of the region normalizer
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/hbase-archetypes/hbase-archetype-builder/project-summary.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/project-summary.html 
b/hbase-archetypes/hbase-archetype-builder/project-summary.html
index 281c21c..1af1c27 100644
--- a/hbase-archetypes/hbase-archetype-builder/project-summary.html
+++ b/hbase-archetypes/hbase-archetype-builder/project-summary.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-05
+Last Published: 2017-07-07
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/hbase-archetypes/hbase-archetype-builder/source-repository.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/source-repository.html 
b/hbase-archetypes/hbase-archetype-builder/source-repository.html
index ca63feb..352cb47 100644
--- a/hbase-archetypes/hbase-archetype-builder/source-repository.html
+++ b/hbase-archetypes/hbase-archetype-builder/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-05
+Last Published: 2017-07-07
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/hbase-archetypes/hbase-archetype-builder/team-list.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/team-list.html 
b/hbase-archetypes/hbase-archetype-builder/team-list.html
index 9fa9de3..c3ae076 100644
--- a/hbase-archetypes/hbase-archetype-builder/team-list.html
+++ b/hbase-archetypes/hbase-archetype-builder/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-05
+Last Published: 2017-07-07
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/hbase-archetypes/hbase-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-client-project/checkstyle.html 
b/hbase-archetypes/hbase-client-project/checkstyle.html
index 22f32ec..e6d2e77 100644
--- a/hbase-archetypes/hbase-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-05
+Last Published: 2017-07-07
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/hbase-archetypes/hbase-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-client-project/dependencies.html 
b/hbase-archetypes/hbase-client-project/dependencies.html
index 09a161e..95fef83 100644
--- a/hbase-archetypes/hbase-client-project/dependencies.html
+++ b/hbase-archetypes/hbase-client-project/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-05
+Last Published: 2017-07-07
   | Version: 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
index 5916f4e..14ed003 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -130,7 +130,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  method accesses the read-only copy more than once it makes a local copy of it
  to ensure it accesses the same copy.
 
- The methods getVersionedList(), getVersionedTail(), and 
flattenYoungestSegment() are also
+ The methods getVersionedList(), getVersionedTail(), and flattenOneSegment() 
are also
  protected by a lock since they need to have a consistent (atomic) view of the 
pipeline list
  and version number.
 
@@ -217,7 +217,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 boolean
-flattenYoungestSegment(longrequesterVersion)
+flattenOneSegment(longrequesterVersion,
+ CompactingMemStore.IndexTypeidxType)
 If the caller holds the current version, go over the the 
pipeline and try to flatten each
  segment.
 
@@ -267,10 +268,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 pushHead(MutableSegmentsegment)
 
 
+private void
+replaceAtIndex(intidx,
+  ImmutableSegmentnewSegment)
+
+
 long
 size()
 
-
+
 boolean
 swap(VersionedSegmentsListversionedList,
 ImmutableSegmentsegment,
@@ -279,13 +285,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Swaps the versioned list at the tail of the pipeline with a 
new segment.
 
 
-
+
 private void
 swapSuffix(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Segmentsuffix,
   ImmutableSegmentsegment,
   booleancloseSegmentsInSuffix)
 
-
+
 private boolean
 validateSuffixList(http://docs.oracle.com/javase/8/docs/api/java/util/LinkedList.html?is-external=true;
 title="class or interface in java.util">LinkedListImmutableSegmentsuffix)
 
@@ -473,13 +479,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 private staticlonggetSegmentsKeySize(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Segmentlist)
 
 
-
+
 
 
 
 
-flattenYoungestSegment
-publicbooleanflattenYoungestSegment(longrequesterVersion)
+flattenOneSegment
+publicbooleanflattenOneSegment(longrequesterVersion,
+ CompactingMemStore.IndexTypeidxType)
 If the caller holds the current version, go over the the 
pipeline and try to flatten each
  segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to 
CellArrayMap based.
  Flattening of the segment that initially is not based on 
ConcurrentSkipListMap has no effect.
@@ -496,7 +503,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isEmpty
-publicbooleanisEmpty()
+publicbooleanisEmpty()
 
 
 
@@ -505,7 +512,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getSegments
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends SegmentgetSegments()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends SegmentgetSegments()
 
 
 
@@ -514,7 +521,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 size
-publiclongsize()
+publiclongsize()
 
 
 
@@ -523,7 +530,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMinSequenceId
-publiclonggetMinSequenceId()
+publiclonggetMinSequenceId()
 
 
 
@@ -532,7 +539,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTailSize
-publicMemstoreSizegetTailSize()
+publicMemstoreSizegetTailSize()
 
 
 
@@ -541,7 +548,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getPipelineSize
-publicMemstoreSizegetPipelineSize()

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400
-1401  // Now if we found a region 
load get the type of cost that was requested.
-1402  if 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index da61859..4fa61f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -366,598 +366,607 @@
 358return this.oldsources;
 359  }
 360
-361  @VisibleForTesting
-362  ListString getAllQueues() {
-363return 
replicationQueues.getAllQueues();
-364  }
-365
-366  void preLogRoll(Path newLog) throws 
IOException {
-367recordLog(newLog);
-368String logName = newLog.getName();
-369String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-370synchronized (latestPaths) {
-371  IteratorPath iterator = 
latestPaths.iterator();
-372  while (iterator.hasNext()) {
-373Path path = iterator.next();
-374if 
(path.getName().contains(logPrefix)) {
-375  iterator.remove();
-376  break;
-377}
-378  }
-379  this.latestPaths.add(newLog);
-380}
-381  }
-382
-383  /**
-384   * Check and enqueue the given log to 
the correct source. If there's still no source for the
-385   * group to which the given log 
belongs, create one
-386   * @param logPath the log path to check 
and enqueue
-387   * @throws IOException
-388   */
-389  private void recordLog(Path logPath) 
throws IOException {
-390String logName = logPath.getName();
-391String logPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(logName);
-392// update replication queues on ZK
-393// synchronize on replicationPeers to 
avoid adding source for the to-be-removed peer
-394synchronized (replicationPeers) {
-395  for (String id : 
replicationPeers.getConnectedPeerIds()) {
-396try {
-397  
this.replicationQueues.addLog(id, logName);
-398} catch (ReplicationException e) 
{
-399  throw new IOException("Cannot 
add log to replication queue"
-400  + " when creating a new 
source, queueId=" + id + ", filename=" + logName, e);
-401}
-402  }
-403}
-404// update walsById map
-405synchronized (walsById) {
-406  for (Map.EntryString, 
MapString, SortedSetString entry : 
this.walsById.entrySet()) {
-407String peerId = entry.getKey();
-408MapString, 
SortedSetString walsByPrefix = entry.getValue();
-409boolean existingPrefix = false;
-410for (Map.EntryString, 
SortedSetString walsEntry : walsByPrefix.entrySet()) {
-411  SortedSetString wals = 
walsEntry.getValue();
-412  if (this.sources.isEmpty()) {
-413// If there's no slaves, 
don't need to keep the old wals since
-414// we only consider the last 
one when a new slave comes in
-415wals.clear();
-416  }
-417  if 
(logPrefix.equals(walsEntry.getKey())) {
-418wals.add(logName);
-419existingPrefix = true;
-420  }
-421}
-422if (!existingPrefix) {
-423  // The new log belongs to a new 
group, add it into this peer
-424  LOG.debug("Start tracking logs 
for wal group " + logPrefix + " for peer " + peerId);
-425  SortedSetString wals = 
new TreeSet();
-426  wals.add(logName);
-427  walsByPrefix.put(logPrefix, 
wals);
-428}
-429  }
-430}
-431  }
-432
-433  void postLogRoll(Path newLog) throws 
IOException {
-434// This only updates the sources we 
own, not the recovered ones
-435for (ReplicationSourceInterface 
source : this.sources) {
-436  source.enqueueLog(newLog);
-437}
-438  }
-439
-440  @VisibleForTesting
-441  public AtomicLong getTotalBufferUsed() 
{
-442return totalBufferUsed;
-443  }
-444
-445  /**
-446   * Factory method to create a 
replication source
-447   * @param conf the configuration to 
use
-448   * @param fs the file system to use
-449   * @param manager the manager to use
-450   * @param server the server object for 
this region server
-451   * @param peerId the id of the peer 
cluster
-452   * @return the created source
-453   * @throws IOException
-454   */
-455  protected ReplicationSourceInterface 
getReplicationSource(final Configuration conf,
-456  final FileSystem fs, final 
ReplicationSourceManager manager,
-457  final ReplicationQueues 
replicationQueues, final ReplicationPeers replicationPeers,
-458  final Server server, final String 
peerId, final UUID clusterId,
-459  final ReplicationPeerConfig 
peerConfig, final ReplicationPeer replicationPeer)
-460  throws 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index a65f001..08990df 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -342,8 +342,8 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallAction
-org.apache.hadoop.hbase.ipc.CallEvent.Type
 org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
+org.apache.hadoop.hbase.ipc.CallEvent.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 86fce54..80447fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -1336,7 +1336,7 @@ implements HRegionServer
-abort,
 addToMovedRegions,
 addToOnlineRegions,
 checkFileSystem,
 cleanMovedRegions,
 closeAllRegions,
 closeAndOfflineRegion
 ForSplitOrMerge, closeRegion,
 constructRegionServer,
 convertThrowableToIOE,
 createClusterConnection,
 createRegionLoad,
 createRegionServerS
 tatusStub, createRegionServerStatusStub,
 execRegionServerService,
 getCacheConfig,
 getChoreService,
 getClusterConnection,
 getClusterId,
 getCompactionPressure, getCompactionRequester,
 getCompactSplitThread,
 getConfiguration,
 getConfigurationManager,
 getConnection,
 getCoordinatedStateManager,
 getExecutorService,
 getFavoredNodesForRegion,
 getFileSystem,
 getFlushPressure,
 getFlushRequester,
 getFlushThroughputController,
 getFromOnlineRegions,
 getHeapMemoryManager,
 getInfoServer, getLastSequenceId,
 getLeases,
 getMasterAddressTracker,
 getMetaTableLocator,
 getMetrics,
 getMostLoadedRegions,
 getNonceManager,
 getNumberOfOnlineRegions, getOnlineRegion,
 getOnlineRegions,
 getOnlineRegions,
 getOnlineRegionsLocalContext,
 getOnlineTables,
 getRecoveringRegions,
 getRegion,
 getRegionBlockLocations,
 getRegionByEncodedName,
 getRegionByEncodedName,
 getRegionServerAccounting,
 getRegionServerCoprocessorHost,
 getRegionServerCoprocessors,
 getRegionServerMetrics,
 getRegionServerRpcQuotaManager,
 getRegionServerSpaceQuotaManager,
 getRegionsInTransitionInRS,
 getReplicationSourceService,
 getRootDir,
 getRpcServer,
 getRSRpcServices,
 getSecureBulkLoadManager, getStartcode,
 getThreadWakeFrequency,
 getWAL,
 getWALFileSystem,
 getWalRoller,
 getWALRootDir,
 getWALs,
 handleReportForDutyResponse,
 initializeMemStoreChunkCreator,
 isAborted,
 isOnline,
 isStopped,
 isStopping,
 kill,
 movedRegionCleanerPeriod,
 onConfigurationChange,
 postOpenDeployTasks,
 postOpenDeployTasks,
 regionLock,
 removeFromOnlineRegions,
 reportRegionSizesForQuotas, reportRegionStateTransition,
 reportRegionStateTransition,
 reportRegionStateTransition,
 run,
 sendShutdownInterrupt,
 setInitLatch,
 setupClusterConnection,
 shouldUseThisHostnameInstead,
 stop,
 toString,
 tryRegionServerReport,
 unassign, updateConfiguration,
 updateRegionFavoredNodesMapping,
 waitForServerOnline,
 walRollRequestFinished
+abort,
 addToMovedRegions,
 addToOnlineRegions,
 checkFileSystem,
 cleanMovedRegions,
 closeAllRegions,
 closeAndOfflineRegion
 ForSplitOrMerge, closeRegion,
 constructRegionServer,
 convertThrowableToIOE,
 createClusterConnection,
 createRegionLoad,
 createRegionServerS
 tatusStub, createRegionServerStatusStub,
 execRegionServerService,
 getCacheConfig,
 getChoreService,
 getClusterConnection,
 getClusterId,
 getCompactionPressure, getCompactionRequester,
 getCompactSplitThread,
 getConfiguration,
 getConfigurationManager,
 getConnection,
 getCoordinatedStateManager,
 getExecutorService,
 getFavoredNodesForRegion,
 getFileSystem,
 getFlushPressure,
 getFlushRequester,
 getFlushThroughputController,
 getFromOnlineRegions,
 getHeapMemoryManager,
 getInfoServer, getLastSequenceId,
 getLeases,
 getMasterAddressTracker,
 getMetaTableLocator,
 getMetrics,
 getMostLoadedRegions,
 getNonceManager,
 getNumberOfOnlineRegions, getOnlineRegion,
 getOnlineRegions,
 getOnlineRegions,
 getOnlineRegionsLocalContext,
 getOnlineTables,
 getRecoveringRegions,
 getRegion,
 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/classification/tools/class-use/StabilityOptions.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/tools/class-use/StabilityOptions.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/tools/class-use/StabilityOptions.html
index ed31027..b038f42 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/tools/class-use/StabilityOptions.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/tools/class-use/StabilityOptions.html
@@ -4,7 +4,7 @@
 
 
 
-Uses of Class 
org.apache.hadoop.hbase.classification.tools.StabilityOptions (Apache HBase 
2.0.0-SNAPSHOT API)
+Uses of Class 
org.apache.hadoop.hbase.classification.tools.StabilityOptions (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-org.apache.hadoop.hbase.classification.tools (Apache HBase 
2.0.0-SNAPSHOT API)
+org.apache.hadoop.hbase.classification.tools (Apache HBase 
3.0.0-SNAPSHOT API)