[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
index f13140b..1f135b1 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":42,"i43":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -452,24 +452,18 @@ implements 
 
 
-long
-getWriteBufferSize()
-Returns the maximum size in bytes of the write buffer for 
this HTable.
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Increment increment)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[] row,
 byte[] family,
@@ -478,7 +472,7 @@ implements See Table.incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[] row,
 byte[] family,
@@ -488,55 +482,49 @@ implements Atomically increments a column value.
 
 
-
+
 void
 internalClose() 
 
-
+
 void
 mutateRow(RowMutations rm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List puts)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Put put)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(int operationTimeout)
 Set timeout (millisecond) of each operation in this Table 
instance, will override the value
  of hbase.client.operation.timeout in configuration.
 
 
-
+
 void
 setReadRpcTimeout(int readRpcTimeout)
 Set timeout (millisecond) of each rpc read request in 
operations of this Table instance, will
  override the value of hbase.rpc.read.timeout in configuration.
 
 
-
+
 void
 setRpcTimeout(int rpcTimeout)
 Deprecated. 
 
 
-
-void
-setWriteBufferSize(long writeBufferSize)
-Sets the size of the buffer in bytes.
-
-
-
+
 void
 setWriteRpcTimeout(int writeRpcTimeout)
 Set timeout (millisecond) of each rpc write request in 
operations of this Table instance, will
@@ -828,17 +816,12 @@ public http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 Description copied from 
interface: Table
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Specified by:
 put in
 interface Table
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 
@@ -1496,49 +1479,6 @@ public http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 
 
 
-
-
-
-
-
-getWriteBufferSize
-public long getWriteBufferSize()
-Description copied from 
interface: Table
-Returns the maximum size in bytes of the write buffer for 
this HTable.
- 
- The default value comes from the configuration parameter
- hbase.client.write.buffer.
-
-Specified by:
-getWriteBufferSize in
 interface Table
-Returns:
-The size of the write buffer in bytes.
-
-
-
-
-
-
-
-
-setWriteBufferSize
-public void setWriteBufferSize(long writeBufferSize)
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOExce

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 4e865e9..837212d 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -286,10 +286,10 @@
  Warnings
  Errors
 
-2250
+2024
 0
 0
-14897
+12792
 
 Files
 
@@ -304,2810 +304,2810 @@
 0
 1
 
-maven-archiver/pom.properties
-0
-0
-1
-
 org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
 0
 0
 53
-
+
 org/apache/hadoop/hbase/AuthUtil.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/BaseConfigurable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/ByteBufferKeyValue.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/Cell.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/CellComparator.java
 0
 0
 30
-
+
 org/apache/hadoop/hbase/CellScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CellUtil.java
 0
 0
 98
-
+
 org/apache/hadoop/hbase/ChoreService.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/ClusterId.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ClusterStatus.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CompatibilityFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CompoundConfiguration.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/CoordinatedStateManagerFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CoprocessorEnvironment.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/DoNotRetryIOException.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/DroppedSnapshotException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ExtendedCell.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HBaseConfiguration.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
 44
-
+
 org/apache/hadoop/hbase/HConstants.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HRegionInfo.java
 0
 0
 59
-
+
 org/apache/hadoop/hbase/HRegionLocation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HTableDescriptor.java
 0
 0
 34
-
+
 org/apache/hadoop/hbase/HealthChecker.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/IndividualBytesFieldCell.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/JMXListener.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/KeyValue.java
 0
 0
 135
-
+
 org/apache/hadoop/hbase/KeyValueTestUtil.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/KeyValueUtil.java
 0
 0
 30
-
+
 org/apache/hadoop/hbase/LocalHBaseCluster.java
 0
 0
 24
-
+
 org/apache/hadoop/hbase/MetaMutationAnnotation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/MetaTableAccessor.java
 0
 0
 120
-
+
 org/apache/hadoop/hbase/NamespaceDescriptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/RegionLoad.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/RegionLocations.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/RegionStateListener.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ScheduledChore.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/ServerLoad.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ServerName.java
 0
 0
 27
-
+
 org/apache/hadoop/hbase/SettableSequenceId.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/SettableTimestamp.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/SplitLogCounters.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/SplitLogTask.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TableDescriptors.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/TableInfoMissingException.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TableName.java
 0
 0
 20
-
+
 org/apache/hadoop/hbase/TagType.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TagUtil.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ZKNamespaceManager.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/ZNodeClearer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupClientFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupCopyJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupTableInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/FailedArchiveException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HFileArchiver.java
 0
 0
 20
-
+
 org/apache/hadoop/hbase/backup/LogUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreDriver.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreRequest.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/backup/example/ZKTable

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescri

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMap scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index e458be7..c73f25f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -197,8 +197,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
+org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index c9e6133..da630e2 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -312,11 +312,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
-org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
-org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
 org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
+org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
 org.apache.hadoop.hbase.master.RegionState.State
+org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 238447e..eaac1ff 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -422,16 +422,16 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ProcedureState
+org.apache.hadoop.hbase.HConstants.Modify
+org.apache.hadoop.hbase.KeyValue.Type
 org.apache.hadoop.hbase.MemoryCompactionPolicy
-org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
 org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.HConstants.Modify
-org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.Coprocessor.State
 org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
 org.apache.hadoop.hbase.KeepDeletedCells
-org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.ProcedureState
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 455e6cd..7e459fe 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -201,11

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
new file mode 100644
index 000..440b612
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
@@ -0,0 +1,522 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ProcedureExecutor.FailedProcedure (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2
+Class 
ProcedureExecutor.FailedProcedure
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedure
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureExecutor.FailedProcedure
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable>
+
+
+Enclosing class:
+ProcedureExecutor
+
+
+
+private static class ProcedureExecutor.FailedProcedure
+extends Procedure
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+procName 
+
+
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+FailedProcedure(NonceKey nonceKey,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String procName,
+   User owner,
+   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in 
java.io">IOException exception) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+protected boolean
+abort(TEnvironment env)
+The abort() call is asynchronous and each procedure must 
decide how to deal
+ with it, if they want to be abortable.
+
+
+
+protected void
+deserializeStateData(http://docs.oracle.com/javase/8/docs/api/java/io/InputStream.html?is-external=true";
 title="class or interface in java.io">InputStream stream)
+Called on store load to allow the user to decode the 
previously serialized
+ state.
+
+
+
+protected Procedure[]
+execute(TEnvironment env)
+The main code of the procedure.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+getProcName() 
+
+
+protected void
+rollback(TEnvironment env)
+The code to undo what was done by the execute() code.
+
+
+
+protected void
+serializeStateData(http://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true";
 title="class or interface in java.io">OutputStream stream)
+The user-level code of the procedure may have some state to
+ persist (e.g.
+
+
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.procedure2.Procedure
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChi

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index c99c156..88b693b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":9,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":9,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RSRpcServices
+public class RSRpcServices
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements HBaseRPCErrorHandler, 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
 PriorityFunction, ConfigurationObserver
 Implements the regionserver RPC services.
@@ -726,27 +726,20 @@ implements isClientCellBlockSupport(RpcCallContext context) 
 
 
-org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse
-mergeRegions(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request)
-Merge regions on the region server.
-
-
-
 org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse
 multi(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController rpcc,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest request)
 Execute multiple actions on a table: get, mutate, and/or 
execCoprocessor
 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse
 mutate(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController rpcc,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest request)
 Mutate data in a table.
 
 
-
+
 private void
 mutateRows(Region region,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List actions,
@@ -755,52 +748,52 @@ implements Mutate a list of rows atomically.
 
 
-
+
 private RSRpcServices.RegionScannerHolder
 newRegionScanner(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest request,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse.Builder builder) 
 
-
+
 void
 onConfigurationChange(org.apache.hadoop.conf.Configuration newConf)
 This method would be called by the ConfigurationManager
  object when the Configuration object is reloaded from disk.
 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse
 openRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest request)
 Open asynchronously a region or a 

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
index 30c1dfe..7a22c76 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
@@ -31,92 +31,104 @@
 023import java.util.HashMap;
 024import java.util.Map;
 025
-026import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-027import 
org.apache.hadoop.hbase.util.Bytes;
-028import 
org.apache.hadoop.hbase.util.ClassSize;
-029
-030@InterfaceAudience.Public
-031public abstract class 
OperationWithAttributes extends Operation implements Attributes {
-032  // An opaque blob of attributes
-033  private Map 
attributes;
-034
-035  // used for uniquely identifying an 
operation
-036  public static final String ID_ATRIBUTE 
= "_operation.attributes.id";
-037
-038  @Override
-039  public OperationWithAttributes 
setAttribute(String name, byte[] value) {
-040if (attributes == null && 
value == null) {
-041  return this;
-042}
-043
-044if (attributes == null) {
-045  attributes = new 
HashMap<>();
-046}
-047
-048if (value == null) {
-049  attributes.remove(name);
-050  if (attributes.isEmpty()) {
-051this.attributes = null;
-052  }
-053} else {
-054  attributes.put(name, value);
-055}
-056return this;
-057  }
-058
-059  @Override
-060  public byte[] getAttribute(String name) 
{
-061if (attributes == null) {
-062  return null;
-063}
-064
-065return attributes.get(name);
-066  }
-067
-068  @Override
-069  public Map 
getAttributesMap() {
-070if (attributes == null) {
-071  return Collections.emptyMap();
-072}
-073return 
Collections.unmodifiableMap(attributes);
-074  }
-075
-076  protected long getAttributeSize() {
-077long size = 0;
-078if (attributes != null) {
-079  size += 
ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY);
-080  for(Map.Entry 
entry : this.attributes.entrySet()) {
-081size += 
ClassSize.align(ClassSize.STRING + entry.getKey().length());
-082size += 
ClassSize.align(ClassSize.ARRAY + entry.getValue().length);
-083  }
-084}
-085return size;
-086  }
-087
-088  /**
-089   * This method allows you to set an 
identifier on an operation. The original
-090   * motivation for this was to allow the 
identifier to be used in slow query
-091   * logging, but this could obviously be 
useful in other places. One use of
-092   * this could be to put a class.method 
identifier in here to see where the
-093   * slow query is coming from.
-094   * @param id
-095   *  id to set for the scan
-096   */
-097  public OperationWithAttributes 
setId(String id) {
-098setAttribute(ID_ATRIBUTE, 
Bytes.toBytes(id));
-099return this;
-100  }
-101
-102  /**
-103   * This method allows you to retrieve 
the identifier for the operation if one
-104   * was set.
-105   * @return the id or null if not set
-106   */
-107  public String getId() {
-108byte[] attr = 
getAttribute(ID_ATRIBUTE);
-109return attr == null? null: 
Bytes.toString(attr);
-110  }
-111}
+026import 
org.apache.hadoop.hbase.HConstants;
+027import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+028import 
org.apache.hadoop.hbase.util.Bytes;
+029import 
org.apache.hadoop.hbase.util.ClassSize;
+030
+031@InterfaceAudience.Public
+032public abstract class 
OperationWithAttributes extends Operation implements Attributes {
+033  // An opaque blob of attributes
+034  private Map 
attributes;
+035
+036  // used for uniquely identifying an 
operation
+037  public static final String ID_ATRIBUTE 
= "_operation.attributes.id";
+038  private int priority = 
HConstants.PRIORITY_UNSET;
+039
+040  @Override
+041  public OperationWithAttributes 
setAttribute(String name, byte[] value) {
+042if (attributes == null && 
value == null) {
+043  return this;
+044}
+045
+046if (attributes == null) {
+047  attributes = new 
HashMap<>();
+048}
+049
+050if (value == null) {
+051  attributes.remove(name);
+052  if (attributes.isEmpty()) {
+053this.attributes = null;
+054  }
+055} else {
+056  attributes.put(name, value);
+057}
+058return this;
+059  }
+060
+061  @Override
+062  public byte[] getAttribute(String name) 
{
+063if (attributes == null) {
+064  return null;
+065}
+066
+067return attributes.get(name);
+068  }
+069
+070  @Override
+071  public Map 
getAttributesMap() {
+072if (attributes == null) {
+073  return Collections.emptyMap();

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler.html
index 628e2aa..3ec28b0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler
+private static final class FanOutOneBlockAsyncDFSOutputSaslHelper.SaslUnwrapHandler
 extends 
io.netty.channel.SimpleChannelInboundHandler
 
 
@@ -268,7 +268,7 @@ extends 
io.netty.channel.SimpleChannelInboundHandlerSaslClient saslClient
+private final http://docs.oracle.com/javase/8/docs/api/javax/security/sasl/SaslClient.html?is-external=true";
 title="class or interface in javax.security.sasl">SaslClient saslClient
 
 
 
@@ -285,7 +285,7 @@ extends 
io.netty.channel.SimpleChannelInboundHandlerSaslClient saslClient)
+public SaslUnwrapHandler(http://docs.oracle.com/javase/8/docs/api/javax/security/sasl/SaslClient.html?is-external=true";
 title="class or interface in 
javax.security.sasl">SaslClient saslClient)
 
 
 
@@ -302,7 +302,7 @@ extends 
io.netty.channel.SimpleChannelInboundHandlerException
 
 Specified by:
@@ -320,7 +320,7 @@ extends 
io.netty.channel.SimpleChannelInboundHandlerException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler.html
index 9495ef6..0131257 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler
+private static final class FanOutOneBlockAsyncDFSOutputSaslHelper.SaslWrapHandler
 extends io.netty.channel.ChannelOutboundHandlerAdapter
 
 
@@ -270,7 +270,7 @@ extends io.netty.channel.ChannelOutboundHandlerAdapter
 
 
 saslClient
-private final http://docs.oracle.com/javase/8/docs/api/javax/security/sasl/SaslClient.html?is-external=true";
 title="class or interface in javax.security.sasl">SaslClient saslClient
+private final http://docs.oracle.com/javase/8/docs/api/javax/security/sasl/SaslClient.html?is-external=true";
 title="class or interface in javax.security.sasl">SaslClient saslClient
 
 
 
@@ -279,7 +279,7 @@ extends io.netty.channel.ChannelOutboundHandlerAdapter
 
 
 cBuf
-private io.netty.buffer.CompositeByteBuf cBuf
+private io.netty.buffer.CompositeByteBuf cBuf
 
 
 
@@ -296,7 +296,7 @@ extends io.netty.channel.ChannelOutboundHandlerAdapter
 
 
 SaslWrapHandler
-public SaslWrapHandler(http://docs.oracle.com/javase/8/docs/api/javax/security/sasl/SaslClient.html?is-external=true";
 title="class or interface in 
javax.security.sasl">SaslClient saslClient)
+public SaslWrapHandler(http://docs.

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.html
index 5eefe01..59063f5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.html
@@ -58,606 +58,607 @@
 050import 
org.apache.hadoop.hbase.util.CancelableProgressable;
 051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 052import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-053import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-054import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-055import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
-056import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-057import 
org.apache.hadoop.util.StringUtils;
-058import 
org.apache.zookeeper.AsyncCallback;
-059import 
org.apache.zookeeper.KeeperException;
-060import org.apache.zookeeper.data.Stat;
-061
-062/**
-063 * ZooKeeper based implementation of 
{@link SplitLogWorkerCoordination}
-064 * It listen for changes in ZooKeeper 
and
-065 *
-066 */
-067@InterfaceAudience.Private
-068public class ZkSplitLogWorkerCoordination 
extends ZooKeeperListener implements
-069SplitLogWorkerCoordination {
-070
-071  private static final Log LOG = 
LogFactory.getLog(ZkSplitLogWorkerCoordination.class);
-072
-073  private static final int checkInterval 
= 5000; // 5 seconds
-074  private static final int 
FAILED_TO_OWN_TASK = -1;
-075
-076  private  SplitLogWorker worker;
-077
-078  private TaskExecutor 
splitTaskExecutor;
-079
-080  private final Object taskReadyLock = 
new Object();
-081  private AtomicInteger taskReadySeq = 
new AtomicInteger(0);
-082  private volatile String currentTask = 
null;
-083  private int currentVersion;
-084  private volatile boolean shouldStop = 
false;
-085  private final Object grabTaskLock = new 
Object();
-086  private boolean workerInGrabTask = 
false;
-087  private int reportPeriod;
-088  private RegionServerServices server = 
null;
-089  protected final AtomicInteger 
tasksInProgress = new AtomicInteger(0);
-090  private int maxConcurrentTasks = 0;
-091
-092  private final ZkCoordinatedStateManager 
manager;
-093
-094  public 
ZkSplitLogWorkerCoordination(ZkCoordinatedStateManager 
zkCoordinatedStateManager,
-095  ZooKeeperWatcher watcher) {
-096super(watcher);
-097manager = 
zkCoordinatedStateManager;
-098
-099  }
-100
-101  /**
-102   * Override handler from {@link 
ZooKeeperListener}
-103   */
-104  @Override
-105  public void nodeChildrenChanged(String 
path) {
-106if 
(path.equals(watcher.znodePaths.splitLogZNode)) {
-107  if (LOG.isTraceEnabled()) 
LOG.trace("tasks arrived or departed on " + path);
-108  synchronized (taskReadyLock) {
-109
this.taskReadySeq.incrementAndGet();
-110taskReadyLock.notify();
-111  }
-112}
-113  }
-114
-115  /**
-116   * Override handler from {@link 
ZooKeeperListener}
-117   */
-118  @Override
-119  public void nodeDataChanged(String 
path) {
-120// there will be a self generated 
dataChanged event every time attemptToOwnTask()
-121// heartbeats the task znode by 
upping its version
-122synchronized (grabTaskLock) {
-123  if (workerInGrabTask) {
-124// currentTask can change
-125String taskpath = currentTask;
-126if (taskpath != null && 
taskpath.equals(path)) {
-127  getDataSetWatchAsync();
-128}
-129  }
-130}
-131  }
-132
-133  /**
-134   * Override setter from {@link 
SplitLogWorkerCoordination}
-135   */
-136  @Override
-137  public void init(RegionServerServices 
server, Configuration conf,
-138  TaskExecutor splitExecutor, 
SplitLogWorker worker) {
-139this.server = server;
-140this.worker = worker;
-141this.splitTaskExecutor = 
splitExecutor;
-142maxConcurrentTasks = 
conf.getInt("hbase.regionserver.wal.max.splitters", DEFAULT_MAX_SPLITTERS);
-143reportPeriod =
-144
conf.getInt("hbase.splitlog.report.period",
-145  
conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT,
-146
ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3);
-147  }
-148
-149  /* Support functions for ZooKeeper 
async callback */
-150
-151  void getDataSetWatchFailure(String 
path) {
-152synchronized (grabTaskLock) {
-153  if (workerInGrabTask) {
-154// currentTask can change but 
that's ok
-155String taskpath = currentTask;
-156if (taskpath != null && 
taskpath.equals(path)) {
-157  LOG.info("retrying data watch 
on " + path);
-158  
SplitLogCounters.tot_wkr_get_data_retry.incrementA

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
index d3bf407..1d72d9d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
@@ -231,10 +231,14 @@
 ProcedureBiConsumer(AsyncAdmin admin) 
 
 
+SplitTableRegionProcedureBiConsumer(AsyncAdmin admin,
+   TableName tableName) 
+
+
 TableProcedureBiConsumer(AsyncAdmin admin,
 TableName tableName) 
 
-
+
 TruncateTableProcedureBiConsumer(AsyncAdmin admin,
 TableName tableName) 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.ProcedureFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.ProcedureFuture.html
index 5ee28ba..60e2ccd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.ProcedureFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.ProcedureFuture.html
@@ -153,10 +153,14 @@
 HBaseAdmin.RestoreSnapshotFuture 
 
 
+private static class 
+HBaseAdmin.SplitTableRegionFuture 
+
+
 protected static class 
 HBaseAdmin.TableFuture 
 
-
+
 private static class 
 HBaseAdmin.TruncateTableFuture 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.SplitTableRegionFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.SplitTableRegionFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.SplitTableRegionFuture.html
new file mode 100644
index 000..271d5ea
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.SplitTableRegionFuture.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.HBaseAdmin.SplitTableRegionFuture (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.HBaseAdmin.SplitTableRegionFuture
+
+No usage of 
org.apache.hadoop.hbase.client.HBaseAdmin.SplitTableRegionFuture
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.TableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.TableFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.TableFuture.html
index c010360..5dd7d29 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.TableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.TableFuture.html
@@ -146,6 +146,10 @@
 
 
 private static class 
+HBaseAdmin.SplitTableRegionFuture 
+
+
+private static class 
 HBaseAdmin.TruncateTableFuture 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/HBaseAdmin.html 
b/devapidocs/org/apa

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 
PipelineAckStatusGetter {
-139Stat

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index e303773..2748f45 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -29,985 +29,1046 @@
 021import java.util.Collection;
 022import java.util.Map;
 023import java.util.Optional;
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.regex.Pattern;
-026
-027import 
org.apache.hadoop.hbase.ClusterStatus;
-028import 
org.apache.hadoop.hbase.HRegionInfo;
-029import 
org.apache.hadoop.hbase.ProcedureInfo;
-030import 
org.apache.hadoop.hbase.RegionLoad;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-033import 
org.apache.hadoop.hbase.TableName;
-034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-035import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-036import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-037import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-038import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-039import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-040import 
org.apache.hadoop.hbase.util.Pair;
-041
-042/**
-043 * The asynchronous administrative API 
for HBase.
-044 * 

-045 * This feature is still under development, so marked as IA.Private. Will change to public when -046 * done. Use it with caution. -047 */ -048@InterfaceAudience.Public -049public interface AsyncAdmin { -050 -051 /** -052 * @param tableName Table to check. -053 * @return True if table exists already. The return value will be wrapped by a -054 * {@link CompletableFuture}. -055 */ -056 CompletableFuture tableExists(TableName tableName); -057 -058 /** -059 * List all the userspace tables. -060 * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}. -061 * @see #listTables(Optional, boolean) -062 */ -063 default CompletableFuture> listTables() { -064return listTables(Optional.empty(), false); -065 } -066 -067 /** -068 * List all the tables matching the given pattern. -069 * @param pattern The compiled regular expression to match against -070 * @param includeSysTables False to match only against userspace tables -071 * @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}. -072 */ -073 CompletableFuture> listTables(Optional pattern, -074 boolean includeSysTables); -075 -076 /** -077 * List all of the names of userspace tables. -078 * @return a list of table names wrapped by a {@link CompletableFuture}. -079 * @see #listTableNames(Optional, boolean) -080 */ -081 default CompletableFuture> listTableNames() { -082return listTableNames(Optional.empty(), false); -083 } -084 -085 /** -086 * List all of the names of userspace tables. -087 * @param pattern The regular expression to match against -088 * @param includeSysTables False to match only against userspace tables -089 * @return a list of table names wrapped by a {@link CompletableFuture}. -090 */ -091 CompletableFuture> listTableNames(Optional pattern, -092 boolean includeSysTables); -093 -094 /** -095 * Method for getting the tableDescriptor -096 * @param tableName as a {@link TableName} -097 * @return the read-only tableDescriptor wrapped by a {@link CompletableFuture}. -098 */ -099 CompletableFuture getTableDescriptor(TableName tableName); -100 -101 /** -102 * Creates a new table. -103 * @param desc table descriptor for table -104 */ -105 default CompletableFuture createTable(TableDescriptor desc) { -106return createTable(desc, Optional.empty()); -107 } -108 -109 /** -110 * Creates a new table with the specified number of regions. The start key specified will become -111 * the end key of the first region of the table, and the end key specified will become the start -112 * key of the last region of the table (the first region has a null start key and the last region -113 * has a null end key). BigInteger math will be used to divide the key range specified into enough -114 * segments to make the required number of total regions. -115 * @param desc table descriptor for table -116 * @param startKey beginning of key range -117 * @param endKey end of key range -118 * @param numRegions the total number of regions to create -119 */ -120 CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, -121 int n


[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.Mast

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(List servers) throws IOException {
-4039final 
List pbServers = new 
ArrayList<>(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public List 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallable>(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public List 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064List servers = 
new ArrayList<>();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
-4076final 
List pbServers = new 
ArrayList<>(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public List 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallable>(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public List 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056List servers = 
new ArrayList<>();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
+4068executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public List 
listReplicatedTableCFs() throws IOException {
+4079List 
replicatedTableCFs = new ArrayList<>();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor tabl

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
index 9e00828..027a68b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
@@ -168,7 +168,7 @@
 160  try (Connection conn = 
ConnectionFactory.createConnection(getConf());
 161  Admin admin = conn.getAdmin()) 
{
 162
HFileOutputFormat2.configureIncrementalLoadMap(job,
-163
admin.getTableDescriptor((TableName.valueOf(dstTableName;
+163
admin.listTableDescriptor((TableName.valueOf(dstTableName;
 164  }
 165} else {
 166  
TableMapReduceUtil.initTableMapperJob(tableName, scan,



[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * 

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file -065 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). +060 * Cacheable Blocks of an {@link HFile} version 2 file. +061 * Version 2 was introduced in hbase-0.92.0. +062 * +063 *

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file +064 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support +065 * for Version 1 was removed in hbase-1.3.0. 066 * 067 *

HFileBlock: Version 2

068 * In version 2, a block is structured as follows: @@ -120,582 +120,582 @@ 112public class HFileBlock implements Cacheable { 113 private static final Log LOG = LogFactory.getLog(HFileBlock.class); 114 -115 /** Type of block. Header field 0. */ -116 private BlockType blockType; -117 -118 /** -119 * Size on disk excluding header, including checksum. Header field 1. -120 * @see Writer#putHeader(byte[], int, int, int, int) -121 */ -122 private int onDiskSizeWithoutHeader; -123 -124 /** -125 * Size of pure data. Does not include header or checksums. Header field 2. -126 * @see Writer#putHeader(byte[], int, int, int, int) -127 */ -128 private int uncompressedSizeWithoutHeader; -129 -130 /** -131 * The offset of the previous block on disk. Header field 3. -132 * @see Writer#putHeader(byte[], int, int, int, int) -133 */ -134 private long prevBlockOffset; -135 -136 /** -137 * Size on disk of header + data. Excludes checksum. Header field 6, -138 * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. -139 * @see Writer#putHeader(byte[], int, int, int, int) -140 */ -141 private int onDiskDataSizeWithHeader; -142 -143 -144 /** -145 * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by -146 * a single ByteBuffer or by many. Make no assumptions. -147 * -148 *

Be careful reading from this buf. Duplicate and work on the duplicate or if -149 * not, be sure to reset position and limit else trouble down the road. -150 * -151 *

TODO: Make this read-only once made. -152 * -153 *

We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have -154 * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. -155 * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be -156 * good if could be confined to cache-use only but hard-to-do. -157 */ -158 private ByteBuff buf; -159 -160 /** Meta data that holds meta information on the hfileblock. -161 */ -162 private HFileContext fileContext; -163 -164 /** -165 * The offset of this block in the file. Populated by the reader for -166 * convenience of access. This offset is not part of the block header. -167 */ -168 private long offset = UNSET; -169 -170 private MemoryType memType = MemoryType.EXCLUSIVE; -171 -172 /** -173 * The on-disk size of the next block, including the header and checksums if present, obtained by -174 * peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's -175 * header, or UNSET if unknown. -176 * -177 * Blocks try to carry the size of the next block to read in this data member. They will even have -178 * this value when served from cache. Could save a seek in the case where we are iterating through -179 * a file and some of the blocks come from cache. If from cache, then having this info to hand -180 * will save us doing a seek to read the header so we can read the body of a block. -181 * TODO: see how effective this is at saving seeks. -182 */ -183 private int nextBlockOnDiskSize = UNSET; -184 -185 /** -186 * On a checksum failure, do these many succeeding read requests using hdfs checksums before -187 * auto-reenabling hbase checksum verification. -188 */ -189 static final int CHECKSUM_VE


[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index 9d116d5..e701ef4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -1579,154 +1579,160 @@ service.
 
 
 
+(package private) class 
+AsyncAdminBuilderBase
+Base class for all asynchronous admin builders.
+
+
+
 class 
 AsyncAdminRequestRetryingCaller 
 
-
+
 (package private) class 
 AsyncBatchRpcRetryingCaller
 Retry caller for batch.
 
 
-
+
 (package private) class 
 AsyncClientScanner
 The asynchronous client scanner implementation.
 
 
-
+
 (package private) class 
 AsyncConnectionConfiguration
 Timeout configs.
 
 
-
+
 (package private) class 
 AsyncConnectionImpl
 The implementation of AsyncConnection.
 
 
-
+
 class 
 AsyncHBaseAdmin
 The implementation of AsyncAdmin.
 
 
-
+
 class 
 AsyncMasterRequestRpcRetryingCaller
 Retry caller for a request call to master.
 
 
-
+
 (package private) class 
 AsyncMetaRegionLocator
 The asynchronous locator for meta region.
 
 
-
+
 (package private) class 
 AsyncNonMetaRegionLocator
 The asynchronous locator for regions other than meta.
 
 
-
+
 (package private) class 
 AsyncProcess
 This class  allows a continuous flow of requests.
 
 
-
+
 class 
 AsyncProcessTask
 Contains the attributes of a task which will be executed
  by AsyncProcess.
 
 
-
+
 (package private) class 
 AsyncRegionLocator
 The asynchronous region locator.
 
 
-
+
 (package private) interface 
 AsyncRegistry
 Implementations hold cluster information such as this 
cluster's id, location of hbase:meta, etc..
 
 
-
+
 (package private) class 
 AsyncRegistryFactory
 Get instance of configured Registry.
 
 
-
+
 interface 
 AsyncRequestFuture
 The context used to wait for results from one submit 
call.
 
 
-
+
 (package private) class 
 AsyncRequestFutureImpl
 The context, and return value, for a single 
submit/submitAll call.
 
 
-
+
 class 
 AsyncRpcRetryingCaller 
 
-
+
 (package private) class 
 AsyncRpcRetryingCallerFactory
 Factory to create an AsyncRpcRetryCaller.
 
 
-
+
 (package private) class 
 AsyncScanSingleRegionRpcRetryingCaller
 Retry caller for scanning a region.
 
 
-
+
 (package private) class 
 AsyncSingleRequestRpcRetryingCaller
 Retry caller for a single request, such as get, put, 
delete, etc.
 
 
-
+
 (package private) class 
 AsyncTableBuilderBase
 Base class for all asynchronous table builders.
 
 
-
+
 (package private) class 
 AsyncTableImpl
 The implementation of AsyncTable.
 
 
-
+
 (package private) class 
 AsyncTableRegionLocatorImpl
 The implementation of AsyncRegionLocator.
 
 
-
+
 (package private) class 
 AsyncTableResultScanner
 The ResultScanner implementation 
for AsyncTable.
 
 
-
+
 class 
 BatchScanResultCache
 A scan result cache for batched scan, i.e,
  scan.getBatch() > 0 && 
!scan.getAllowPartialResults().
 
 
-
+
 class 
 BufferedMutatorImpl
 
@@ -1734,255 +1740,261 @@ service.
  but meant for batched, potentially asynchronous puts.
 
 
-
+
 (package private) interface 
 Cancellable
 This should be implemented by the Get/Scan implementations 
that
  talk to replica regions.
 
 
-
+
 (package private) class 
 CancellableRegionServerCallable
 This class is used to unify HTable calls with AsyncProcess 
Framework.
 
 
-
+
 class 
 ClientAsyncPrefetchScanner
 ClientAsyncPrefetchScanner implements async scanner 
behaviour.
 
 
-
+
 class 
 ClientCoprocessorRpcController
 Client side rpc controller for coprocessor 
implementation.
 
 
-
+
 (package private) class 
 ClientIdGenerator
 The class that is able to determine some unique strings for 
the client,
  such as an IP address, PID, and composite deterministic ID.
 
 
-
+
 class 
 ClientScanner
 Implements the scanner interface for the HBase client.
 
 
-
+
 class 
 ClientServiceCallable
 A RegionServerCallable set to use the Client protocol.
 
 
-
+
 class 
 ClientSideRegionScanner
 A client scanner for a region opened for read-only on the 
client side.
 
 
-
+
 class 
 ClientSimpleScanner
 ClientSimpleScanner implements a sync scanner 
behaviour.
 
 
-
+
 class 
 ClientUtil 
 
-
+
 interface 
 ClusterConnection
 Internal methods on Connection that should not be used by 
user code.
 
 
-
+
 (package private) class 
 ClusterStatusListener
 A class that receives the cluster status, and provide it as 
a set of service to the client.
 
 
-
+
 static class 
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 An ModifyableFamilyDescr

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 
org.apache

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 4c523d9..c37b4f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":18,"i53":6,"i54":6,"i55":6,"i56":18,"i57":6,"i58":18,"i59":6,"i60":6,"i61":18,"i62":6,"i63":18,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":18,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -105,8 +105,8 @@ var activeTableTab = "activeTableTab";
 
 
 
-@InterfaceAudience.Private
-public interface AsyncAdmin
+@InterfaceAudience.Public
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -124,7 +124,7 @@ public interface 
-All Methods Instance Methods Abstract Methods 
+All Methods Instance Methods Abstract Methods Default Methods 
 
 Modifier and Type
 Method and Description
@@ -162,14 +162,14 @@ public interface assign(byte[] regionName) 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
-balancer()
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
+balance()
 Invoke the balancer.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
-balancer(boolean force)
+balance(boolean forcible)
 Invoke the balancer.
 
 
@@ -181,85 +181,64 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-closeRegion(byte[] regionname,
-   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
+closeRegion(byte[] regionName,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
new file mode 100644
index 000..d0f1508
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
@@ -0,0 +1,1794 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001 /**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.master.balancer;
+019
+020import java.util.ArrayList;
+021import java.util.Arrays;
+022import java.util.Collection;
+023import java.util.Collections;
+024import java.util.Comparator;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.HashSet;
+028import java.util.Iterator;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.Map.Entry;
+032import java.util.NavigableMap;
+033import java.util.Random;
+034import java.util.Set;
+035import java.util.TreeMap;
+036import java.util.function.Predicate;
+037
+038import 
org.apache.commons.lang.NotImplementedException;
+039import org.apache.commons.logging.Log;
+040import 
org.apache.commons.logging.LogFactory;
+041import 
org.apache.hadoop.conf.Configuration;
+042import 
org.apache.hadoop.hbase.ClusterStatus;
+043import 
org.apache.hadoop.hbase.HBaseIOException;
+044import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
+045import 
org.apache.hadoop.hbase.HRegionInfo;
+046import 
org.apache.hadoop.hbase.ServerLoad;
+047import 
org.apache.hadoop.hbase.ServerName;
+048import 
org.apache.hadoop.hbase.TableName;
+049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+050import 
org.apache.hadoop.hbase.master.LoadBalancer;
+051import 
org.apache.hadoop.hbase.master.MasterServices;
+052import 
org.apache.hadoop.hbase.master.RackManager;
+053import 
org.apache.hadoop.hbase.master.RegionPlan;
+054import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+055import 
org.apache.hadoop.hbase.security.access.AccessControlLists;
+056import 
org.apache.hadoop.util.StringUtils;
+057
+058import 
com.google.common.annotations.VisibleForTesting;
+059import com.google.common.base.Joiner;
+060import 
com.google.common.collect.ArrayListMultimap;
+061import com.google.common.collect.Lists;
+062import com.google.common.collect.Sets;
+063
+064/**
+065 * The base class for load balancers. It 
provides the the functions used to by
+066 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
+067 * in the edge cases. It doesn't provide 
an implementation of the
+068 * actual balancing algorithm.
+069 *
+070 */
+071public abstract class BaseLoadBalancer 
implements LoadBalancer {
+072  protected static final int 
MIN_SERVER_BALANCE = 2;
+073  private volatile boolean stopped = 
false;
+074
+075  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
+076
+077  static final 
Predicate IDLE_SERVER_PREDICATOR
+078= load -> 
load.getNumberOfRegions() == 0;
+079
+080  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
+081
+082  private static class DefaultRackManager 
extends RackManager {
+083@Override
+084public String getRack(ServerName 
server) {
+085  return UNKNOWN_RACK;
+086}
+087  }
+088
+089  /**
+090   * The constructor that uses the basic 
MetricsBalancer
+091   */
+092  protected BaseLoadBalancer() {
+093metricsBalancer = new 
MetricsBalancer();
+094  }
+095
+096  /**
+097   * This Constructor accepts an instance 
of MetricsBalancer,
+098   * which will be used instead of 
creating a new one
+099   */
+100  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
+101this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
+10

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
index 1e72f7d..f4cde49 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
@@ -1241,69 +1241,73 @@
 StripeStoreFileManager.getSplitPoint(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection sfs) 
 
 
+private long
+HStore.getStorefilesSize(http://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate predicate) 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 DefaultStoreFileManager.getUnneededFiles(long maxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List filesCompacting) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StoreFileManager.getUnneededFiles(long maxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List filesCompacting) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StripeStoreFileManager.getUnneededFiles(long maxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List filesCompacting) 
 
-
+
 static boolean
 StoreUtils.hasReferences(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection files)
 Determines whether any files in the collection are 
references.
 
 
-
+
 private static void
 StripeStoreFileManager.insertFileIntoStripe(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList stripe,
 StoreFile sf)
 Inserts a file in the correct place (by seqnum) in a stripe 
copy.
 
 
-
+
 void
 DefaultStoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection sfs) 
 
-
+
 void
 StoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection sfs)
 Adds new files, either for from MemStore flush or bulk 
insert, into the structure.
 
 
-
+
 void
 StripeStoreFileManager.insertNewFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection sfs) 
 
-
+
 void
 DefaultStoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List storeFiles) 
 
-
+
 void
 StoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List storeFiles)
 Loads the initial store files into empty 
StoreFileManager.
 
 
-
+
 void
 StripeStoreFileManager.loadFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List storeFiles) 
 
-
+
 private void
 StripeStoreFileManager.loadUnclassifiedStoreFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List storeFiles)
 Loads initial store files that were picked up from some 
physical location pertaining to
  this store (presumably).
 
 
-
+
 private void
 HStore.logCompactionEndMessage(CompactionRequest cr,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List sfs,
@@ -1312,45 +1316,45 @@
 Log a very elaborate compaction completion message.
 
 
-
+
 private void
 DefaultStoreFileManager.markCompactedAway(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection compactedFiles) 
 
-
+
 private void
 StripeStore

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 984a058..8432d54 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-06-08
+  Last Published: 
2017-06-09
 
 
 



[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hadoop/hbase/security/package-tree.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/security/package-tree.html 
b/apidocs/org/apache/hadoop/hbase/security/package-tree.html
index a5c597f..f1bfeed 100644
--- a/apidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/apidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -4,7 +4,7 @@
 
 
 
-org.apache.hadoop.hbase.security Class Hierarchy (Apache HBase 
2.0.0-SNAPSHOT API)
+org.apache.hadoop.hbase.security Class Hierarchy (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-Uses of Package org.apache.hadoop.hbase.security (Apache HBase 
2.0.0-SNAPSHOT API)
+Uses of Package org.apache.hadoop.hbase.security (Apache HBase 
3.0.0-SNAPSHOT API)