[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
index 7d33235..4e2ac53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
@@ -35,324 +35,335 @@
 027import 
org.apache.commons.logging.LogFactory;
 028import 
org.apache.hadoop.hbase.HRegionInfo;
 029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-031import 
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-032import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-033import 
org.apache.hadoop.hbase.master.RegionState.State;
-034import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-035import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-036import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionOpenOperation;
-037import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-038import 
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-039import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-040import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData;
-042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
-043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-044
-045/**
-046 * Procedure that describe the assignment 
of a single region.
-047 * There can only be one 
RegionTransitionProcedure per region running at a time
-048 * since each procedure takes a lock on 
the region.
-049 *
-050 * pThe Assign starts by pushing 
the "assign" operation to the AssignmentManager
-051 * and then will go in a "waiting" 
state.
-052 * The AM will batch the "assign" 
requests and ask the Balancer where to put
-053 * the region (the various policies will 
be respected: retain, round-robin, random).
-054 * Once the AM and the balancer have 
found a place for the region the procedure
-055 * will be resumed and an "open region" 
request will be placed in the Remote Dispatcher
-056 * queue, and the procedure once again 
will go in a "waiting state".
-057 * The Remote Dispatcher will batch the 
various requests for that server and
-058 * they will be sent to the RS for 
execution.
-059 * The RS will complete the open 
operation by calling master.reportRegionStateTransition().
-060 * The AM will intercept the transition 
report, and notify the procedure.
-061 * The procedure will finish the 
assignment by publishing to new state on meta
-062 * or it will retry the assignment.
-063 *
-064 * pThis procedure does not 
rollback when beyond the first
-065 * REGION_TRANSITION_QUEUE step; it will 
press on trying to assign in the face of
-066 * failure. Should we ignore rollback 
calls to Assign/Unassign then? Or just
-067 * remove rollback here?
-068 */
-069@InterfaceAudience.Private
-070public class AssignProcedure extends 
RegionTransitionProcedure {
-071  private static final Log LOG = 
LogFactory.getLog(AssignProcedure.class);
-072
-073  private boolean forceNewPlan = false;
-074
-075  /**
-076   * Gets set as desired target on move, 
merge, etc., when we want to go to a particular server.
-077   * We may not be able to respect this 
request but will try. When it is NOT set, then we ask
-078   * the balancer to assign. This value 
is used below in startTransition to set regionLocation if
-079   * non-null. Setting regionLocation in 
regionServerNode is how we override balancer setting
-080   * destination.
-081   */
-082  protected volatile ServerName 
targetServer;
-083
-084  public AssignProcedure() {
-085// Required by the Procedure 
framework to create the procedure on replay
-086super();
-087  }
-088
-089  public AssignProcedure(final 
HRegionInfo regionInfo) {
-090this(regionInfo, false);
-091  }
-092
-093  public AssignProcedure(final 
HRegionInfo regionInfo, final boolean forceNewPlan) {
-094super(regionInfo);
-095this.forceNewPlan = forceNewPlan;
-096this.targetServer = null;
-097  }
-098
-099  public AssignProcedure(final 
HRegionInfo regionInfo, final ServerName destinationServer) {
-100super(regionInfo);
-101this.forceNewPlan = false;
-102this.targetServer = 
destinationServer;
-103  }
-104
-105  @Override
-106  public TableOperationType 
getTableOperationType() {
-107return 
TableOperationType.REGION_ASSIGN;
-108  }
-109

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
index f719c05..ee8518f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
@@ -800,14 +800,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private Connection
-BackupAdminImpl.conn
-
-
-(package private) Connection
-BackupCommands.Command.conn
-
-
-private Connection
 RestoreTablesClient.conn
 
 
@@ -815,9 +807,17 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 TableBackupClient.conn
 
 
+(package private) Connection
+BackupCommands.Command.conn
+
+
 protected Connection
 BackupManager.conn
 
+
+private Connection
+BackupAdminImpl.conn
+
 
 private Connection
 BackupSystemTable.connection
@@ -1102,13 +1102,13 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-private Connection
-RegionServerCallable.connection
-
-
 (package private) Connection
 ConnectionImplementation.MasterServiceState.connection
 
+
+private Connection
+RegionServerCallable.connection
+
 
 
 
@@ -1153,20 +1153,20 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
+Connection
+Admin.getConnection()
+
+
 (package private) Connection
 RegionAdminServiceCallable.getConnection()
 
-
+
 protected Connection
 HTable.getConnection()
 INTERNAL Used by unit tests and tools to do 
low-level
  manipulations.
 
 
-
-Connection
-Admin.getConnection()
-
 
 Connection
 HBaseAdmin.getConnection()
@@ -1438,11 +1438,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-HRegionPartitioner.connection
+TableInputFormatBase.connection
 
 
 private Connection
-TableInputFormatBase.connection
+HRegionPartitioner.connection
 
 
 
@@ -1475,23 +1475,23 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-(package private) Connection
-MultiTableOutputFormat.MultiTableRecordWriter.connection
-
-
-private Connection
-HRegionPartitioner.connection
-
-
 private Connection
 TableOutputFormat.TableRecordWriter.connection
 
 
+(package private) Connection
+MultiTableOutputFormat.MultiTableRecordWriter.connection
+
+
 private Connection
 TableInputFormatBase.connection
 The underlying Connection 
of the table.
 
 
+
+private Connection
+HRegionPartitioner.connection
+
 
 (package private) Connection
 SyncTable.SyncMapper.sourceConnection
@@ -1615,15 +1615,15 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-RegionPlacementMaintainer.connection
+CatalogJanitor.connection
 
 
 private Connection
-CatalogJanitor.connection
+SnapshotOfRegionAssignmentFromMeta.connection
 
 
 private Connection
-SnapshotOfRegionAssignmentFromMeta.connection
+RegionPlacementMaintainer.connection
 
 
 
@@ -1718,31 +1718,31 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-QuotaObserverChore.conn
+TableQuotaSnapshotStore.conn
 
 
 private Connection
-QuotaObserverChore.TablesWithQuotas.conn
+SpaceQuotaRefresherChore.conn
 
 
 private Connection
-SnapshotQuotaObserverChore.conn
+NamespaceQuotaSnapshotStore.conn
 
 
 private Connection
-NamespaceQuotaSnapshotStore.conn
+SnapshotQuotaObserverChore.conn
 
 
 private Connection
-TableQuotaSnapshotStore.conn
+QuotaObserverChore.conn
 
 
 private Connection
-TableSpaceQuotaSnapshotNotifier.conn
+QuotaObserverChore.TablesWithQuotas.conn
 
 
 private Connection
-SpaceQuotaRefresherChore.conn
+TableSpaceQuotaSnapshotNotifier.conn
 
 
 private Connection
@@ -2144,11 +2144,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private Connection
-ReplicationSourceManager.connection
+HFileReplicator.connection
 
 
 private Connection
-HFileReplicator.connection
+ReplicationSourceManager.connection
 
 
 private Connection
@@ -2164,13 +2164,13 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-Connection
-ReplicationSourceManager.getConnection()
-
-
 private Connection
 ReplicationSink.getConnection()
 
+
+Connection
+ReplicationSourceManager.getConnection()
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/ConnectionConfiguration.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ConnectionConfiguration.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ConnectionConfiguration.html
index 705d13a..ffc7872 100644
--- 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index a691301..9a8f45d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -165,3337 +165,3323 @@
 157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
 158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
-170import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
-171import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-172import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action;
-173import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-174import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-175import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-176import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
-178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
-188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
-189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto;
-190import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
-191import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-192import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
-193import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-194import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult;
-195import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
-196import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-197import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-198import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-199import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-200import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
-201import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
-202import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
-203import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.html
index 5584401..811053c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/DropDeletesCompactionScanQueryMatcher.html
@@ -61,34 +61,35 @@
 053  protected final long earliestPutTs;
 054
 055  protected 
DropDeletesCompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker 
deletes,
-056  long readPointToUse, long 
earliestPutTs, long oldestUnexpiredTS, long now) {
-057super(scanInfo, deletes, 
readPointToUse, oldestUnexpiredTS, now);
-058this.timeToPurgeDeletes = 
scanInfo.getTimeToPurgeDeletes();
-059this.earliestPutTs = earliestPutTs;
-060  }
-061
-062  protected final MatchCode 
tryDropDelete(Cell cell) {
-063long timestamp = 
cell.getTimestamp();
-064// If it is not the time to drop the 
delete marker, just return
-065if (timeToPurgeDeletes  0 
 now - timestamp = timeToPurgeDeletes) {
-066  return MatchCode.INCLUDE;
-067}
-068if (keepDeletedCells == 
KeepDeletedCells.TRUE
-069|| (keepDeletedCells == 
KeepDeletedCells.TTL  timestamp = oldestUnexpiredTS)) {
-070  // If keepDeletedCell is true, or 
the delete marker is not expired yet, we should include it
-071  // in version counting to see if we 
can drop it. The only exception is that, we can make
-072  // sure that no put is older than 
this delete marker. And under this situation, all later
-073  // cells of this column(must be 
delete markers) can be skipped.
-074  if (timestamp  earliestPutTs) 
{
-075return 
columns.getNextRowOrNextColumn(cell);
-076  } else {
-077return null;
-078  }
-079} else {
-080  return MatchCode.SKIP;
-081}
-082  }
-083}
+056  ColumnTracker columns, long 
readPointToUse, long earliestPutTs, long oldestUnexpiredTS,
+057  long now) {
+058super(scanInfo, deletes, columns, 
readPointToUse, oldestUnexpiredTS, now);
+059this.timeToPurgeDeletes = 
scanInfo.getTimeToPurgeDeletes();
+060this.earliestPutTs = earliestPutTs;
+061  }
+062
+063  protected final MatchCode 
tryDropDelete(Cell cell) {
+064long timestamp = 
cell.getTimestamp();
+065// If it is not the time to drop the 
delete marker, just return
+066if (timeToPurgeDeletes  0 
 now - timestamp = timeToPurgeDeletes) {
+067  return MatchCode.INCLUDE;
+068}
+069if (keepDeletedCells == 
KeepDeletedCells.TRUE
+070|| (keepDeletedCells == 
KeepDeletedCells.TTL  timestamp = oldestUnexpiredTS)) {
+071  // If keepDeletedCell is true, or 
the delete marker is not expired yet, we should include it
+072  // in version counting to see if we 
can drop it. The only exception is that, we can make
+073  // sure that no put is older than 
this delete marker. And under this situation, all later
+074  // cells of this column(must be 
delete markers) can be skipped.
+075  if (timestamp  earliestPutTs) 
{
+076return 
columns.getNextRowOrNextColumn(cell);
+077  } else {
+078return null;
+079  }
+080} else {
+081  return MatchCode.SKIP;
+082}
+083  }
+084}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
new file mode 100644
index 000..4eb968f
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
@@ -0,0 +1,114 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index afd9ccc..904b921 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -30,1916 +30,1984 @@
 022import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 023
 024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Set;
-032import 
java.util.concurrent.atomic.AtomicBoolean;
-033import 
java.util.concurrent.atomic.AtomicInteger;
-034import 
java.util.concurrent.atomic.AtomicLong;
-035import java.util.stream.Collectors;
-036import java.util.stream.Stream;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.CopyOnWriteArrayList;
-039import java.util.concurrent.DelayQueue;
-040import java.util.concurrent.TimeUnit;
-041
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.conf.Configuration;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.ProcedureInfo;
-047import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-048import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-049import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-050import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-051import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-052import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-053import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
-054import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout;
-055import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-057import 
org.apache.hadoop.hbase.security.User;
-058import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-059import 
org.apache.hadoop.hbase.util.NonceKey;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061import 
org.apache.hadoop.hbase.util.Threads;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077@InterfaceStability.Evolving
-078public class 
ProcedureExecutorTEnvironment {
-079  private static final Log LOG = 
LogFactory.getLog(ProcedureExecutor.class);
-080
-081  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-082  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-083
-084  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-085  
"hbase.procedure.worker.keep.alive.time.msec";
-086  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = Long.MAX_VALUE;
-087
-088  Testing testing = null;
-089  public static class Testing {
-090protected boolean killIfSuspended = 
false;
-091protected boolean 
killBeforeStoreUpdate = false;
-092protected boolean 
toggleKillBeforeStoreUpdate = false;
-093
-094protected boolean 
shouldKillBeforeStoreUpdate() {
-095  final boolean kill = 
this.killBeforeStoreUpdate;
-096  if 
(this.toggleKillBeforeStoreUpdate) {
-097this.killBeforeStoreUpdate = 
!kill;
-098LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-099  }
-100  return kill;
-101}
-102
-103protected boolean 
shouldKillBeforeStoreUpdate(final boolean isSuspended) {
-104  return (isSuspended  
!killIfSuspended) ? false : shouldKillBeforeStoreUpdate();
-105}
-106  }
-107
-108  public interface 
ProcedureExecutorListener {
-109void procedureLoaded(long procId);
-110void procedureAdded(long procId);
-111void procedureFinished(long 
procId);
-112  }
-113
-114  /**
-115   * Internal cleaner that removes the 
completed procedure 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
index a945b54..422c076 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
@@ -92,3454 +92,3410 @@
 084import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
 085import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
 086import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-087import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-088import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-089import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-090import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-091import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-092import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-093import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-094import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-095import 
org.apache.hadoop.hbase.ipc.QosPriority;
-096import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-097import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-098import 
org.apache.hadoop.hbase.ipc.RpcServer;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-100import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-101import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices;
-105import 
org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
-106import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-107import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-108import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-109import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-110import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-111import 
org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
-112import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-113import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-114import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-115import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-116import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-117import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-118import 
org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
-119import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-120import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-121import 
org.apache.hadoop.hbase.security.Superusers;
-122import 
org.apache.hadoop.hbase.security.User;
-123import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-124import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-125import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-126import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-127import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-128import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-143import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
index 0683e6c..91fe6e6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
@@ -40,188 +40,190 @@
 032 * number of versions, compression 
settings, etc.
 033 *
 034 * It is used as input when creating a 
table or adding a column.
-035 */
-036@InterfaceAudience.Public
-037public interface ColumnFamilyDescriptor 
{
-038
-039  @InterfaceAudience.Private
-040  static final 
ComparatorColumnFamilyDescriptor COMPARATOR
-041= (ColumnFamilyDescriptor lhs, 
ColumnFamilyDescriptor rhs) - {
-042int result = 
Bytes.compareTo(lhs.getName(), rhs.getName());
-043if (result != 0) {
-044  return result;
-045}
-046// punt on comparison for ordering, 
just calculate difference.
-047result = lhs.getValues().hashCode() - 
rhs.getValues().hashCode();
-048if (result != 0) {
-049  return result;
-050}
-051return 
lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode();
-052  };
-053
-054  /**
-055   * @return The storefile/hfile 
blocksize for this column family.
-056   */
-057  int getBlocksize();
-058  /**
-059   * @return bloom filter type used for 
new StoreFiles in ColumnFamily
-060   */
-061  BloomType getBloomFilterType();
-062
-063  /**
-064   * @return Compression type setting.
-065   */
-066  Compression.Algorithm 
getCompactionCompressionType();
-067  /**
-068   * @return Compression type setting.
-069   */
-070  Compression.Algorithm 
getCompressionType();
-071  /**
-072   * @return an unmodifiable map.
-073   */
-074  MapString, String 
getConfiguration();
-075  /**
-076   * @param key the key whose associated 
value is to be returned
-077   * @return accessing the configuration 
value by key.
-078   */
-079  String getConfigurationValue(String 
key);
-080  /**
-081   * @return replication factor set for 
this CF
-082   */
-083  short getDFSReplication();
-084  /**
-085   * @return the data block encoding 
algorithm used in block cache and
-086   * optionally on disk
-087   */
-088  DataBlockEncoding 
getDataBlockEncoding();
-089  /**
-090   * @return Return the raw crypto key 
attribute for the family, or null if not set
-091   */
-092  byte[] getEncryptionKey();
-093
-094  /**
-095   * @return Return the encryption 
algorithm in use by this family
-096   */
-097  String getEncryptionType();
-098  /**
-099   * @return in-memory compaction policy 
if set for the cf. Returns null if no policy is set for
-100   *  for this column family
-101   */
-102  MemoryCompactionPolicy 
getInMemoryCompaction();
-103  /**
-104   * @return return the 
KeepDeletedCells
-105   */
-106  KeepDeletedCells 
getKeepDeletedCells();
-107  /**
-108   * @return maximum number of versions
-109   */
-110  int getMaxVersions();
-111  /**
-112   * @return The minimum number of 
versions to keep.
-113   */
-114  int getMinVersions();
-115  /**
-116   * Get the mob compact partition policy 
for this family
-117   * @return MobCompactPartitionPolicy
-118   */
-119  MobCompactPartitionPolicy 
getMobCompactPartitionPolicy();
-120  /**
-121   * Gets the mob threshold of the 
family.
-122   * If the size of a cell value is 
larger than this threshold, it's regarded as a mob.
-123   * The default threshold is 
1024*100(100K)B.
-124   * @return The mob threshold.
-125   */
-126  long getMobThreshold();
-127  /**
-128   * @return a copy of Name of this 
column family
-129   */
-130  byte[] getName();
-131
-132  /**
-133   * @return Name of this column family
-134   */
-135  String getNameAsString();
-136
-137   /**
-138* @return the scope tag
-139*/
-140  int getScope();
-141  /**
-142   * Not using {@code enum} here because 
HDFS is not using {@code enum} for storage policy, see
-143   * 
org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more 
details.
-144   * @return Return the storage policy in 
use by this family
-145   */
-146  String getStoragePolicy();
-147 /**
-148   * @return Time-to-live of cell 
contents, in seconds.
-149   */
-150  int getTimeToLive();
-151  /**
-152   * @param key The key.
-153   * @return A clone value. Null if no 
mapping for the key
-154   */
-155  Bytes getValue(Bytes key);
-156  /**
-157   * @param key The key.
-158   * @return A clone value. Null if no 
mapping for the key
-159   */
-160  byte[] getValue(byte[] key);
-161  /**
-162   * It clone all bytes of all 
elements.
-163   * @return All values
-164   */
-165  MapBytes, Bytes getValues();
-166  /**
-167   * @return True if hfile DATA type 
blocks should 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.html
index b60a13d..dc03a6e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.html
@@ -35,7 +35,7 @@
 027import 
org.apache.hadoop.hbase.util.Bytes;
 028import 
org.apache.hadoop.hbase.util.CollectionUtils;
 029
-030import com.google.common.collect.Lists;
+030import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 031
 032/**
 033 * Data structure used in the first stage 
of PrefixTree encoding:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.html
index c60167e..48bfd3e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.html
@@ -37,7 +37,7 @@
 029import 
org.apache.hadoop.hbase.util.SimpleMutableByteRange;
 030import 
org.apache.hadoop.hbase.util.Strings;
 031
-032import com.google.common.collect.Lists;
+032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 033
 034/**
 035 * Individual node in a Trie structure.  
Each node is one of 3 types:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.SplitLogManagerDetails.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.SplitLogManagerDetails.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.SplitLogManagerDetails.html
index 4431781..7a4a5e3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.SplitLogManagerDetails.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.SplitLogManagerDetails.html
@@ -40,7 +40,7 @@
 032import 
org.apache.hadoop.hbase.master.SplitLogManager.Task;
 033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 034
-035import 
com.google.common.annotations.VisibleForTesting;
+035import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 036
 037/**
 038 * Coordination for SplitLogManager. It 
creates and works with tasks for split log operationsBR

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.html
index 4431781..7a4a5e3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.html
@@ -40,7 +40,7 @@
 032import 
org.apache.hadoop.hbase.master.SplitLogManager.Task;
 033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 034
-035import 
com.google.common.annotations.VisibleForTesting;
+035import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 036
 037/**
 038 * Coordination for SplitLogManager. It 
creates and works with tasks for split log operationsBR

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.SplitTaskDetails.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.SplitTaskDetails.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.SplitTaskDetails.html
index f122431..21bf820 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.SplitTaskDetails.html
+++ 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171}
-1172 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+191import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyList.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyList.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyList.html
index 4196a6c..6c65fd1 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyList.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyList.html
@@ -114,1476 +114,1636 @@
 106import org.mockito.Mockito;
 107
 108import com.google.common.collect.Lists;
-109
-110/**
-111 * Test class for the Store
-112 */
-113@Category({RegionServerTests.class, 
MediumTests.class})
-114public class TestStore {
-115  private static final Log LOG = 
LogFactory.getLog(TestStore.class);
-116  @Rule public TestName name = new 
TestName();
-117
-118  HStore store;
-119  byte [] table = 
Bytes.toBytes("table");
-120  byte [] family = 
Bytes.toBytes("family");
+109import 
org.apache.hadoop.hbase.filter.Filter;
+110import 
org.apache.hadoop.hbase.filter.FilterBase;
+111import static 
org.junit.Assert.assertEquals;
+112import static 
org.junit.Assert.assertTrue;
+113
+114/**
+115 * Test class for the Store
+116 */
+117@Category({RegionServerTests.class, 
MediumTests.class})
+118public class TestStore {
+119  private static final Log LOG = 
LogFactory.getLog(TestStore.class);
+120  @Rule public TestName name = new 
TestName();
 121
-122  byte [] row = Bytes.toBytes("row");
-123  byte [] row2 = Bytes.toBytes("row2");
-124  byte [] qf1 = Bytes.toBytes("qf1");
-125  byte [] qf2 = Bytes.toBytes("qf2");
-126  byte [] qf3 = Bytes.toBytes("qf3");
-127  byte [] qf4 = Bytes.toBytes("qf4");
-128  byte [] qf5 = Bytes.toBytes("qf5");
-129  byte [] qf6 = Bytes.toBytes("qf6");
-130
-131  NavigableSetbyte[] qualifiers = 
new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR);
-132
-133  ListCell expected = new 
ArrayList();
-134  ListCell result = new 
ArrayList();
-135
-136  long id = System.currentTimeMillis();
-137  Get get = new Get(row);
-138
-139  private HBaseTestingUtility TEST_UTIL = 
new HBaseTestingUtility();
-140  private final String DIR = 
TEST_UTIL.getDataTestDir("TestStore").toString();
-141
+122  HStore store;
+123  byte [] table = 
Bytes.toBytes("table");
+124  byte [] family = 
Bytes.toBytes("family");
+125
+126  byte [] row = Bytes.toBytes("row");
+127  byte [] row2 = Bytes.toBytes("row2");
+128  byte [] qf1 = Bytes.toBytes("qf1");
+129  byte [] qf2 = Bytes.toBytes("qf2");
+130  byte [] qf3 = Bytes.toBytes("qf3");
+131  byte [] qf4 = Bytes.toBytes("qf4");
+132  byte [] qf5 = Bytes.toBytes("qf5");
+133  byte [] qf6 = Bytes.toBytes("qf6");
+134
+135  NavigableSetbyte[] qualifiers = 
new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR);
+136
+137  ListCell expected = new 
ArrayList();
+138  ListCell result = new 
ArrayList();
+139
+140  long id = System.currentTimeMillis();
+141  Get get = new Get(row);
 142
-143  /**
-144   * Setup
-145   * @throws IOException
-146   */
-147  @Before
-148  public void setUp() throws IOException 
{
-149qualifiers.add(qf1);
-150qualifiers.add(qf3);
-151qualifiers.add(qf5);
-152
-153Iteratorbyte[] iter = 
qualifiers.iterator();
-154while(iter.hasNext()){
-155  byte [] next = iter.next();
-156  expected.add(new KeyValue(row, 
family, next, 1, (byte[])null));
-157  get.addColumn(family, next);
-158}
-159  }
-160
-161  private void init(String methodName) 
throws IOException {
-162init(methodName, 
TEST_UTIL.getConfiguration());
+143  private HBaseTestingUtility TEST_UTIL = 
new HBaseTestingUtility();
+144  private final String DIR = 
TEST_UTIL.getDataTestDir("TestStore").toString();
+145
+146
+147  /**
+148   * Setup
+149   * @throws IOException
+150   */
+151  @Before
+152  public void setUp() throws IOException 
{
+153qualifiers.add(qf1);
+154qualifiers.add(qf3);
+155qualifiers.add(qf5);
+156
+157Iteratorbyte[] iter = 
qualifiers.iterator();
+158while(iter.hasNext()){
+159  byte [] next = iter.next();
+160  expected.add(new KeyValue(row, 
family, next, 1, (byte[])null));
+161  get.addColumn(family, next);
+162}
 163  }
 164
-165  private Store init(String methodName, 
Configuration conf)
-166  throws IOException {
-167HColumnDescriptor hcd = new 
HColumnDescriptor(family);
-168// some of the tests write 4 versions 
and then flush
-169// (with HBASE-4241, lower versions 
are collected on flush)
-170hcd.setMaxVersions(4);
-171return init(methodName, conf, hcd);
-172  }
-173
-174  private Store init(String methodName, 
Configuration conf,
-175  HColumnDescriptor hcd) throws 
IOException {
-176HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(table));
-177return init(methodName, conf, htd, 
hcd);
-178  }
-179
-180  private Store 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.EndpointOperationWithResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.EndpointOperationWithResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.EndpointOperationWithResult.html
index 25d6b70..635798d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.EndpointOperationWithResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.EndpointOperationWithResult.html
@@ -1234,540 +1234,541 @@
 1226   * @param result the result returned 
by the append
 1227   * @throws IOException if an error 
occurred on the coprocessor
 1228   */
-1229  public void postAppend(final Append 
append, final Result result) throws IOException {
-1230execOperation(coprocessors.isEmpty() 
? null : new RegionOperation() {
-1231  @Override
-1232  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1233  throws IOException {
-1234oserver.postAppend(ctx, append, 
result);
-1235  }
-1236});
-1237  }
-1238
-1239  /**
-1240   * @param increment increment object
-1241   * @param result the result returned 
by postIncrement
-1242   * @throws IOException if an error 
occurred on the coprocessor
-1243   */
-1244  public Result postIncrement(final 
Increment increment, Result result) throws IOException {
-1245return 
execOperationWithResult(result,
-1246coprocessors.isEmpty() ? null : 
new RegionOperationWithResultResult() {
-1247  @Override
-1248  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1249  throws IOException {
-1250
setResult(oserver.postIncrement(ctx, increment, getResult()));
-1251  }
-1252});
-1253  }
-1254
-1255  /**
-1256   * @param scan the Scan 
specification
-1257   * @return scanner id to return to 
client if default operation should be
-1258   * bypassed, null otherwise
-1259   * @exception IOException Exception
-1260   */
-1261  public RegionScanner 
preScannerOpen(final Scan scan) throws IOException {
-1262return execOperationWithResult(true, 
null,
-1263coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1264  @Override
-1265  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1266  throws IOException {
-1267
setResult(oserver.preScannerOpen(ctx, scan, getResult()));
-1268  }
-1269});
-1270  }
-1271
-1272  /**
-1273   * See
-1274   * {@link 
RegionObserver#preStoreScannerOpen(ObserverContext,
-1275   *Store, Scan, NavigableSet, 
KeyValueScanner)}
-1276   */
-1277  public KeyValueScanner 
preStoreScannerOpen(final Store store, final Scan scan,
-1278  final NavigableSetbyte[] 
targetCols, final long readPt) throws IOException {
-1279return 
execOperationWithResult(null,
-1280coprocessors.isEmpty() ? null : 
new RegionOperationWithResultKeyValueScanner() {
-1281  @Override
-1282  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1283  throws IOException {
-1284
setResult(oserver.preStoreScannerOpen(ctx, store, scan, targetCols, 
getResult(), readPt));
-1285  }
-1286});
-1287  }
-1288
-1289  /**
-1290   * @param scan the Scan 
specification
-1291   * @param s the scanner
-1292   * @return the scanner instance to 
use
-1293   * @exception IOException Exception
-1294   */
-1295  public RegionScanner 
postScannerOpen(final Scan scan, RegionScanner s) throws IOException {
-1296return execOperationWithResult(s,
-1297coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1298  @Override
-1299  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1300  throws IOException {
-1301
setResult(oserver.postScannerOpen(ctx, scan, getResult()));
-1302  }
-1303});
-1304  }
-1305
-1306  /**
-1307   * @param s the scanner
-1308   * @param results the result set 
returned by the region server
-1309   * @param limit the maximum number of 
results to return
-1310   * @return 'has next' indication to 
client if bypassing default behavior, or
-1311   * null otherwise
-1312   * @exception IOException Exception
-1313   */
-1314  public Boolean preScannerNext(final 
InternalScanner s,
-1315  final ListResult results, 
final int limit) throws IOException {
-1316return execOperationWithResult(true, 
false,
-1317coprocessors.isEmpty() ? null : 
new RegionOperationWithResultBoolean() {
-1318  @Override
-1319  public void call(RegionObserver 
oserver, 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
index f729c99..0a32350 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
@@ -54,757 +54,756 @@
 046import 
io.netty.channel.ChannelPipeline;
 047import io.netty.channel.EventLoop;
 048import 
io.netty.channel.SimpleChannelInboundHandler;
-049import 
io.netty.channel.socket.nio.NioSocketChannel;
-050import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-051import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-052import 
io.netty.handler.timeout.IdleStateEvent;
-053import 
io.netty.handler.timeout.IdleStateHandler;
-054import io.netty.util.concurrent.Future;
-055import 
io.netty.util.concurrent.FutureListener;
-056import 
io.netty.util.concurrent.Promise;
-057
-058import java.io.IOException;
-059import 
java.lang.reflect.InvocationTargetException;
-060import java.lang.reflect.Method;
-061import java.util.ArrayList;
-062import java.util.EnumSet;
-063import java.util.List;
-064import java.util.concurrent.TimeUnit;
-065
-066import org.apache.commons.logging.Log;
-067import 
org.apache.commons.logging.LogFactory;
-068import 
org.apache.hadoop.conf.Configuration;
-069import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-070import 
org.apache.hadoop.crypto.Encryptor;
-071import org.apache.hadoop.fs.CreateFlag;
-072import org.apache.hadoop.fs.FileSystem;
-073import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-074import org.apache.hadoop.fs.Path;
-075import 
org.apache.hadoop.fs.UnresolvedLinkException;
-076import 
org.apache.hadoop.fs.permission.FsPermission;
-077import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-078import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-079import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-080import 
org.apache.hadoop.hbase.util.FSUtils;
-081import 
org.apache.hadoop.hdfs.DFSClient;
-082import 
org.apache.hadoop.hdfs.DFSOutputStream;
-083import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-084import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-085import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-086import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-088import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-092import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-093import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-100import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-102import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-103import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-104import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-105import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-106import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-107import 
org.apache.hadoop.io.EnumSetWritable;
-108import 
org.apache.hadoop.ipc.RemoteException;
-109import org.apache.hadoop.net.NetUtils;
-110import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-111import 
org.apache.hadoop.security.token.Token;
-112import 
org.apache.hadoop.util.DataChecksum;
-113
-114/**
-115 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-116 */
-117@InterfaceAudience.Private
-118public final class 
FanOutOneBlockAsyncDFSOutputHelper {
+049import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
+050import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+051import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-160import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 427d86f..68cf5fb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract class RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+private abstract class RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
 extends RawAsyncHBaseAdmin.ProcedureBiConsumer
 
 
@@ -248,7 +248,7 @@ extends 
 
 namespaceName
-protected finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String namespaceName
+protected finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String namespaceName
 
 
 
@@ -265,7 +265,7 @@ extends 
 
 NamespaceProcedureBiConsumer
-NamespaceProcedureBiConsumer(AsyncAdminadmin,
+NamespaceProcedureBiConsumer(AsyncAdminadmin,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 
 
@@ -283,7 +283,7 @@ extends 
 
 getOperationType
-abstracthttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+abstracthttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 
 
@@ -292,7 +292,7 @@ extends 
 
 getDescription
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetDescription()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetDescription()
 
 
 
@@ -301,7 +301,7 @@ extends 
 
 onFinished
-voidonFinished()
+voidonFinished()
 
 Specified by:
 onFinishedin
 classRawAsyncHBaseAdmin.ProcedureBiConsumer
@@ -314,7 +314,7 @@ extends 
 
 onError
-voidonError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwableerror)
+voidonError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwableerror)
 
 Specified by:
 onErrorin
 classRawAsyncHBaseAdmin.ProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
index 2a58b36..e1a7afe 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract class RawAsyncHBaseAdmin.ProcedureBiConsumer
+private abstract class RawAsyncHBaseAdmin.ProcedureBiConsumer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true;
 title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
 
@@ -226,7 +226,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/function/
 
 
 admin
-protected finalAsyncAdmin admin
+protected finalAsyncAdmin admin
 
 
 
@@ -243,7 +243,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/function/
 
 
 ProcedureBiConsumer
-ProcedureBiConsumer(AsyncAdminadmin)
+ProcedureBiConsumer(AsyncAdminadmin)
 
 
 
@@ -260,7 +260,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/function/
 
 
 onFinished
-abstractvoidonFinished()
+abstractvoidonFinished()
 
 
 
@@ -269,7 +269,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/function/
 
 
 onError

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
index cfa3f61..5f84eff 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
@@ -41,85 +41,128 @@
 033public class VersionInfo {
 034  private static final Log LOG = 
LogFactory.getLog(VersionInfo.class.getName());
 035
-036  /**
-037   * Get the hbase version.
-038   * @return the hbase version string, 
eg. "0.6.3-dev"
-039   */
-040  public static String getVersion() {
-041return Version.version;
-042  }
-043
-044  /**
-045   * Get the subversion revision number 
for the root directory
-046   * @return the revision number, eg. 
"451451"
-047   */
-048  public static String getRevision() {
-049return Version.revision;
-050  }
-051
-052  /**
-053   * The date that hbase was compiled.
-054   * @return the compilation date in unix 
date format
-055   */
-056  public static String getDate() {
-057return Version.date;
-058  }
-059
-060  /**
-061   * The user that compiled hbase.
-062   * @return the username of the user
-063   */
-064  public static String getUser() {
-065return Version.user;
-066  }
-067
-068  /**
-069   * Get the subversion URL for the root 
hbase directory.
-070   * @return the url
-071   */
-072  public static String getUrl() {
-073return Version.url;
-074  }
-075
-076  static String[] versionReport() {
-077return new String[] {
-078  "HBase " + getVersion(),
-079  "Source code repository " + 
getUrl() + " revision=" + getRevision(),
-080  "Compiled by " + getUser() + " on " 
+ getDate(),
-081  "From source with checksum " + 
getSrcChecksum()
-082  };
-083  }
-084
-085  /**
-086   * Get the checksum of the source files 
from which Hadoop was compiled.
-087   * @return a string that uniquely 
identifies the source
-088   **/
-089  public static String getSrcChecksum() 
{
-090return Version.srcChecksum;
-091  }
-092
-093  public static void writeTo(PrintWriter 
out) {
-094for (String line : versionReport()) 
{
-095  out.println(line);
-096}
-097  }
-098
-099  public static void writeTo(PrintStream 
out) {
-100for (String line : versionReport()) 
{
-101  out.println(line);
-102}
-103  }
-104
-105  public static void logVersion() {
-106for (String line : versionReport()) 
{
-107  LOG.info(line);
-108}
-109  }
-110
-111  public static void main(String[] args) 
{
-112writeTo(System.out);
+036  // If between two dots there is not a 
number, we regard it as a very large number so it is
+037  // higher than any numbers in the 
version.
+038  private static int VERY_LARGE_NUMBER = 
10;
+039
+040  /**
+041   * Get the hbase version.
+042   * @return the hbase version string, 
eg. "0.6.3-dev"
+043   */
+044  public static String getVersion() {
+045return Version.version;
+046  }
+047
+048  /**
+049   * Get the subversion revision number 
for the root directory
+050   * @return the revision number, eg. 
"451451"
+051   */
+052  public static String getRevision() {
+053return Version.revision;
+054  }
+055
+056  /**
+057   * The date that hbase was compiled.
+058   * @return the compilation date in unix 
date format
+059   */
+060  public static String getDate() {
+061return Version.date;
+062  }
+063
+064  /**
+065   * The user that compiled hbase.
+066   * @return the username of the user
+067   */
+068  public static String getUser() {
+069return Version.user;
+070  }
+071
+072  /**
+073   * Get the subversion URL for the root 
hbase directory.
+074   * @return the url
+075   */
+076  public static String getUrl() {
+077return Version.url;
+078  }
+079
+080  static String[] versionReport() {
+081return new String[] {
+082  "HBase " + getVersion(),
+083  "Source code repository " + 
getUrl() + " revision=" + getRevision(),
+084  "Compiled by " + getUser() + " on " 
+ getDate(),
+085  "From source with checksum " + 
getSrcChecksum()
+086  };
+087  }
+088
+089  /**
+090   * Get the checksum of the source files 
from which Hadoop was compiled.
+091   * @return a string that uniquely 
identifies the source
+092   **/
+093  public static String getSrcChecksum() 
{
+094return Version.srcChecksum;
+095  }
+096
+097  public static void writeTo(PrintWriter 
out) {
+098for (String line : versionReport()) 
{
+099  out.println(line);
+100}
+101  }
+102
+103  public static void writeTo(PrintStream 
out) {
+104for (String line : versionReport()) 
{
+105  out.println(line);
+106}
+107  }
+108
+109  public static void logVersion() {
+110for (String line : versionReport()) 
{
+111  LOG.info(line);
+112}
 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
index cd54aaf..6b95323 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -149,6 +149,13 @@ extends Class and Description
 
 
+static class
+CompactingMemStore.IndexType
+Types of indexes (part of immutable segments) to be used 
after flattening,
+ compaction, or merge are applied.
+
+
+
 private class
 CompactingMemStore.InMemoryFlushRunnable
 The in-memory-flusher thread performs the flush 
asynchronously.
@@ -175,6 +182,14 @@ extends 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPACTING_MEMSTORE_INDEX_DEFAULT
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPACTING_MEMSTORE_INDEX_KEY
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 COMPACTING_MEMSTORE_TYPE_DEFAULT
 
 
@@ -202,30 +217,34 @@ extends IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY
 
 
+private CompactingMemStore.IndexType
+indexType
+
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
 inMemoryFlushInProgress
 
-
+
 private long
 inmemoryFlushSize
 
-
+
 private boolean
 inWalReplay
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG
 
-
+
 private CompactionPipeline
 pipeline
 
-
+
 private RegionServicesForStores
 regionServices
 
-
+
 private Store
 store
 
@@ -325,108 +344,116 @@ extends getImmutableSegments()
 
 
+CompactingMemStore.IndexType
+getIndexType()
+
+
 (package private) long
 getInmemoryFlushSize()
 
-
+
 private Segment
 getLastSegment()
 
-
+
 (package private) Cell
 getNextRow(Cellcell)
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor
 getPool()
 
-
+
 private RegionServicesForStores
 getRegionServices()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 getScanners(longreadPt)
 
-
+
 protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSegment
 getSegments()
 
-
+
 long
 getSmallestReadPoint()
 
-
+
 Store
 getStore()
 
-
+
 boolean
 hasImmutableSegments()
 
-
+
 protected long
 heapSize()
 
-
+
 (package private) void
 initiateType(MemoryCompactionPolicycompactionType)
 
-
+
 private void
 initInmemoryFlushSize(org.apache.hadoop.conf.Configurationconf)
 
-
+
 boolean
 isCompositeSnapshot()
 
-
+
 (package private) boolean
 isMemStoreFlushingInMemory()
 
-
+
 boolean
 isSloppy()
 
-
+
 protected long
 keySize()
 
-
+
 long
 preFlushSeqIDEstimation()
 This method is called before the flush is executed.
 
 
-
+
 protected void
 pushActiveToPipeline(MutableSegmentactive)
 
-
+
 private void
 pushPipelineToSnapshot()
 
-
+
 private void
 pushTailToSnapshot()
 
-
+
 private void
 pushToSnapshot(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListImmutableSegmentsegments)
 
-
+
 void
 setCompositeSnapshot(booleanuseCompositeSnapshot)
 
-
+
+void

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400
-1401  // Now if we found a region 
load get the type of cost that was requested.
-1402  if 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/DataBlockEncoding.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/DataBlockEncoding.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/DataBlockEncoding.html
index 32085ba..d3a379b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/DataBlockEncoding.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/DataBlockEncoding.html
@@ -87,21 +87,27 @@
 
 
 
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
 org.apache.hadoop.hbase.codec.prefixtree
 
 
-
+
 org.apache.hadoop.hbase.io.encoding
 
 
-
+
 org.apache.hadoop.hbase.io.hfile
 
 Provides implementations of HFile and 
HFile
  BlockCache.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
@@ -126,7 +132,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 DataBlockEncoding
-HColumnDescriptor.getDataBlockEncoding()
+HColumnDescriptor.getDataBlockEncoding()
+Deprecated.
+
 
 
 
@@ -139,7 +147,64 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HColumnDescriptor
-HColumnDescriptor.setDataBlockEncoding(DataBlockEncodingtype)
+HColumnDescriptor.setDataBlockEncoding(DataBlockEncodingvalue)
+Deprecated.
+Set data block encoding algorithm used in block cache.
+
+
+
+
+
+
+
+
+Uses of DataBlockEncoding in 
org.apache.hadoop.hbase.client
+
+Fields in org.apache.hadoop.hbase.client
 declared as DataBlockEncoding
+
+Modifier and Type
+Field and Description
+
+
+
+static DataBlockEncoding
+ColumnFamilyDescriptorBuilder.DEFAULT_DATA_BLOCK_ENCODING
+Default data block encoding algorithm.
+
+
+
+
+
+Methods in org.apache.hadoop.hbase.client
 that return DataBlockEncoding
+
+Modifier and Type
+Method and Description
+
+
+
+DataBlockEncoding
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.getDataBlockEncoding()
+
+
+DataBlockEncoding
+ColumnFamilyDescriptor.getDataBlockEncoding()
+
+
+
+
+Methods in org.apache.hadoop.hbase.client
 with parameters of type DataBlockEncoding
+
+Modifier and Type
+Method and Description
+
+
+
+ColumnFamilyDescriptorBuilder
+ColumnFamilyDescriptorBuilder.setDataBlockEncoding(DataBlockEncodingvalue)
+
+
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.setDataBlockEncoding(DataBlockEncodingtype)
 Set data block encoding algorithm used in block cache.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/io/encoding/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/encoding/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/package-use.html
index ece9276..30a95a2 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/encoding/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/encoding/package-use.html
@@ -87,21 +87,27 @@
 
 
 
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
 org.apache.hadoop.hbase.codec.prefixtree
 
 
-
+
 org.apache.hadoop.hbase.io.encoding
 
 
-
+
 org.apache.hadoop.hbase.io.hfile
 
 Provides implementations of HFile and 
HFile
  BlockCache.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
@@ -128,6 +134,23 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+
+
+
+Classes in org.apache.hadoop.hbase.io.encoding
 used by org.apache.hadoop.hbase.client
+
+Class and Description
+
+
+
+DataBlockEncoding
+Provide access to all data block encoding algorithms.
+
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
index e963e90..982ab4b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static enum CacheConfig.ExternalBlockCaches
+private static enum CacheConfig.ExternalBlockCaches
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumCacheConfig.ExternalBlockCaches
 Enum of all built in external block caches.
  This is used for config.
@@ -228,7 +228,7 @@ the order they are declared.
 
 
 memcached
-public static 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
index bc2627f..7aeab6a 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
@@ -4,7 +4,7 @@
 
 
 
-Uses of Class org.apache.hadoop.hbase.TableNotFoundException (Apache 
HBase 2.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.TableNotFoundException (Apache 
HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-Uses of Interface org.apache.hadoop.hbase.Tag (Apache HBase 
2.0.0-SNAPSHOT API)
+Uses of Interface org.apache.hadoop.hbase.Tag (Apache HBase 
3.0.0-SNAPSHOT API)