[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index 9bc6240..2a20fad 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":42,"i69":42,"i70":42,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -598,63 +598,75 @@ extends numFamilies()
 
 
+Scan
+readAllVersions()
+Get all available versions.
+
+
+
+Scan
+readVersions(intversions)
+Get up to the specified number of versions of each 
column.
+
+
+
 (package private) Scan
 resetMvccReadPoint()
 Set the mvcc read point to -1 which means do not use 
it.
 
 
-
+
 Scan
 setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,Permissionperms)
 
-
+
 Scan
 setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
   Permissionperms)
 
-
+
 Scan
 setAllowPartialResults(booleanallowPartialResults)
 Setting whether the caller wants to see the partial results 
when server returns
  less-than-expected cells.
 
 
-
+
 Scan
 setAsyncPrefetch(booleanasyncPrefetch)
 
-
+
 Scan
 setAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 byte[]value)
 Sets an attribute.
 
 
-
+
 Scan
 setAuthorizations(Authorizationsauthorizations)
 Sets the authorizations to be used by this Query
 
 
-
+
 Scan
 setBatch(intbatch)
 Set the maximum number of cells to return for each call to 
next().
 
 
-
+
 Scan
 setCacheBlocks(booleancacheBlocks)
 Set whether blocks should be cached for this Scan.
 
 
-
+
 Scan
 setCaching(intcaching)
 Set the number of rows for caching that will be passed to 
scanners.
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 longminStamp,
@@ -663,145 +675,151 @@ extends 
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 TimeRangetr)
 
-
+
 Scan
 setConsistency(Consistencyconsistency)
 Sets the consistency level for this operation
 
 
-
+
 Scan
 setFamilyMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]familyMap)
 Setting the familyMap
 
 
-
+
 Scan
 setFilter(Filterfilter)
 Apply the specified server-side filter when performing the 
Query.
 
 
-
+
 Scan
 setId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 This method allows you to set an identifier on an 
operation.
 
 
-
+
 Scan
 setIsolationLevel(IsolationLevellevel)
 Set the isolation level for this query.
 
 
-
+
 Scan
 setLimit(intlimit)
 Set the limit of rows for this scan.
 
 
-
+
 Scan
 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 152efc8..7e1ad78 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 2250,
- Errors: 14897,
+  File: 2024,
+ Errors: 12792,
  Warnings: 0,
  Infos: 0
   
@@ -186,20 +186,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat.java;>org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
-
-
-  0
-
-
-  0
-
-
-  12
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.backup.BackupAdmin.java;>org/apache/hadoop/hbase/backup/BackupAdmin.java
 
 
@@ -256,20 +242,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos.java;>org/apache/hadoop/hbase/ipc/protobuf/generated/TestRpcServiceProtos.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.DelimitedKeyPrefixRegionSplitPolicy.java;>org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java
 
 
@@ -377,7 +349,7 @@ under the License.
   0
 
 
-  26
+  27
 
   
   
@@ -452,20 +424,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.protobuf.ProtobufMagic.java;>org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
-
-
-  0
-
-
-  0
-
-
-  3
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ClientSimpleScanner.java;>org/apache/hadoop/hbase/client/ClientSimpleScanner.java
 
 
@@ -480,7 +438,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.RegionLocateType.java;>org/apache/hadoop/hbase/client/RegionLocateType.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.protobuf.ProtobufMagic.java;>org/apache/hadoop/hbase/protobuf/ProtobufMagic.java
 
 
   0
@@ -489,12 +447,12 @@ under the License.
   0
 
 
-  0
+  3
 
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.java;>org/apache/hadoop/hbase/rest/protobuf/generated/ColumnSchemaMessage.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.RegionLocateType.java;>org/apache/hadoop/hbase/client/RegionLocateType.java
 
 
   0
@@ -550,20 +508,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormatEscaper.java;>org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatEscaper.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.snapshot.HBaseSnapshotException.java;>org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
 
 
@@ -788,7 +732,7 @@ under the License.
   
   

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
index c56aa24..33d9997 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
@@ -634,7 +634,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 commitWriter
-protected abstracthttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathcommitWriter(Twriter,
+protected abstracthttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathcommitWriter(Twriter,
 Compactor.FileDetailsfd,
 CompactionRequestrequest)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -652,7 +652,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 abortWriter
-protected abstractvoidabortWriter(Twriter)
+protected abstractvoidabortWriter(Twriter)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -666,7 +666,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 preCreateCoprocScanner
-protectedInternalScannerpreCreateCoprocScanner(CompactionRequestrequest,
+protectedInternalScannerpreCreateCoprocScanner(CompactionRequestrequest,
  ScanTypescanType,
  longearliestPutTs,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScannerscanners,
@@ -695,7 +695,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 postCreateCoprocScanner
-protectedInternalScannerpostCreateCoprocScanner(CompactionRequestrequest,
+protectedInternalScannerpostCreateCoprocScanner(CompactionRequestrequest,
   ScanTypescanType,
   InternalScannerscanner,
   Useruser)
@@ -719,7 +719,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 performCompaction
-protectedbooleanperformCompaction(Compactor.FileDetailsfd,
+protectedbooleanperformCompaction(Compactor.FileDetailsfd,
 InternalScannerscanner,
 CellSinkwriter,
 longsmallestReadPoint,
@@ -752,7 +752,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createScanner
-protectedInternalScannercreateScanner(Storestore,
+protectedInternalScannercreateScanner(Storestore,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScannerscanners,
 ScanTypescanType,
 longsmallestReadPoint,
@@ -778,7 +778,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createScanner
-protectedInternalScannercreateScanner(Storestore,
+protectedInternalScannercreateScanner(Storestore,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScannerscanners,
 longsmallestReadPoint,
 longearliestPutTs,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
index b3aaecd..f7f639e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
@@ 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 37c3366..90cb88d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class ProcedureExecutor.WorkerThread
+private final class ProcedureExecutor.WorkerThread
 extends ProcedureExecutor.StoppableThread
 
 
@@ -271,7 +271,7 @@ extends 
 
 executionStartTime
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong executionStartTime
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong executionStartTime
 
 
 
@@ -280,7 +280,7 @@ extends 
 
 activeProcedure
-privateProcedure activeProcedure
+privateProcedure activeProcedure
 
 
 
@@ -297,7 +297,7 @@ extends 
 
 WorkerThread
-publicWorkerThread(http://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true;
 title="class or interface in java.lang">ThreadGroupgroup)
+publicWorkerThread(http://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true;
 title="class or interface in java.lang">ThreadGroupgroup)
 
 
 
@@ -314,7 +314,7 @@ extends 
 
 sendStopSignal
-publicvoidsendStopSignal()
+publicvoidsendStopSignal()
 
 Specified by:
 sendStopSignalin
 classProcedureExecutor.StoppableThread
@@ -327,7 +327,7 @@ extends 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
@@ -342,7 +342,7 @@ extends 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
@@ -355,7 +355,7 @@ extends 
 
 getCurrentRunTime
-publiclonggetCurrentRunTime()
+publiclonggetCurrentRunTime()
 
 Returns:
 the time since the current procedure is running
@@ -368,7 +368,7 @@ extends 
 
 keepAlive
-privatebooleankeepAlive(longlastUpdate)
+privatebooleankeepAlive(longlastUpdate)
 
 
 



[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
index 744adb8..0753549 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
@@ -897,45 +897,37 @@
 booleanforceFlushAllStores)
 
 
-void
-CompactSplit.requestRegionsMerge(Regiona,
-   Regionb,
-   booleanforcible,
-   longmasterSystemTime,
-   Useruser)
-
-
 boolean
 CompactSplit.requestSplit(Regionr)
 
-
+
 void
 CompactSplit.requestSplit(Regionr,
 byte[]midKey)
 
-
+
 void
 CompactSplit.requestSplit(Regionr,
 byte[]midKey,
 Useruser)
 
-
+
 void
 CompactSplit.requestSystemCompaction(Regionr,
Stores,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 
-
+
 void
 CompactSplit.requestSystemCompaction(Regionr,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path
 SecureBulkLoadManager.secureBulkLoadHFiles(Regionregion,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequestrequest)
 
-
+
 private CompactionContext
 CompactSplit.selectCompaction(Regionr,
 Stores,
@@ -943,7 +935,7 @@
 CompactionRequestrequest,
 Useruser)
 
-
+
 private void
 HRegionServer.updateRecoveringRegionLastFlushedSequenceId(Regionr)
 A helper function to store the last flushed sequence Id 
with the previous failed RS for a

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 0006a5a..22817f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -692,20 +692,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.regionserver.ScanType
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.Region.Operation
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index f26f8aa..8f0943d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -284,944 +284,951 @@
 276this.mvccReadPoint = 
scan.getMvccReadPoint();
 277this.limit = scan.getLimit();
 278this.needCursorResult = 
scan.isNeedCursorResult();
-279  }
-280
-281  /**
-282   * Builds a scan object with the same 
specs as get.
-283   * @param get get to model scan after
-284   */
-285  public Scan(Get get) {
-286this.startRow = get.getRow();
-287this.includeStartRow = true;
-288this.stopRow = get.getRow();
-289this.includeStopRow = true;
-290this.filter = get.getFilter();
-291this.cacheBlocks = 
get.getCacheBlocks();
-292this.maxVersions = 
get.getMaxVersions();
-293this.storeLimit = 
get.getMaxResultsPerColumnFamily();
-294this.storeOffset = 
get.getRowOffsetPerColumnFamily();
-295this.tr = get.getTimeRange();
-296this.familyMap = 
get.getFamilyMap();
-297this.asyncPrefetch = false;
-298this.consistency = 
get.getConsistency();
-299
this.setIsolationLevel(get.getIsolationLevel());
-300this.loadColumnFamiliesOnDemand = 
get.getLoadColumnFamiliesOnDemandValue();
-301for (Map.EntryString, byte[] 
attr : get.getAttributesMap().entrySet()) {
-302  setAttribute(attr.getKey(), 
attr.getValue());
-303}
-304for (Map.Entrybyte[], 
TimeRange entry : get.getColumnFamilyTimeRange().entrySet()) {
-305  TimeRange tr = entry.getValue();
-306  
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
-307}
-308this.mvccReadPoint = -1L;
-309  }
-310
-311  public boolean isGetScan() {
-312return includeStartRow  
includeStopRow
-313 
ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
-314  }
-315
-316  /**
-317   * Get all columns from the specified 
family.
-318   * p
-319   * Overrides previous calls to 
addColumn for this family.
-320   * @param family family name
-321   * @return this
-322   */
-323  public Scan addFamily(byte [] family) 
{
-324familyMap.remove(family);
-325familyMap.put(family, null);
-326return this;
-327  }
-328
-329  /**
-330   * Get the column from the specified 
family with the specified qualifier.
-331   * p
-332   * Overrides previous calls to 
addFamily for this family.
-333   * @param family family name
-334   * @param qualifier column qualifier
-335   * @return this
-336   */
-337  public Scan addColumn(byte [] family, 
byte [] qualifier) {
-338NavigableSetbyte [] set = 
familyMap.get(family);
-339if(set == null) {
-340  set = new 
TreeSet(Bytes.BYTES_COMPARATOR);
-341}
-342if (qualifier == null) {
-343  qualifier = 
HConstants.EMPTY_BYTE_ARRAY;
-344}
-345set.add(qualifier);
-346familyMap.put(family, set);
-347return this;
-348  }
-349
-350  /**
-351   * Set versions of columns only within 
the specified timestamp range,
-352   * [minStamp, maxStamp).  Note, default 
maximum versions to return is 1.  If
-353   * your time range spans more than one 
version and you want all versions
-354   * returned, up the number of versions 
beyond the default.
-355   * @param minStamp minimum timestamp 
value, inclusive
-356   * @param maxStamp maximum timestamp 
value, exclusive
-357   * @see #setMaxVersions()
-358   * @see #setMaxVersions(int)
-359   * @return this
-360   */
-361  @Override
-362  public Scan setTimeRange(long minStamp, 
long maxStamp) throws IOException {
-363return (Scan) 
super.setTimeRange(minStamp, maxStamp);
-364  }
-365
-366  /**
-367   * Set versions of columns only within 
the specified timestamp range,
-368   * @param tr Input TimeRange
-369   * @return this for invocation 
chaining
-370   */
-371  public Scan setTimeRange(TimeRange tr) 
{
-372return (Scan) 
super.setTimeRange(tr);
-373  }
-374
-375  /**
-376   * Get versions of columns with the 
specified timestamp. Note, default maximum
-377   * versions to return is 1.  If your 
time range spans more than one version
-378   * and you want all versions returned, 
up the number of versions beyond the
-379   * defaut.
-380   * @param timestamp version timestamp
-381   * @see #setMaxVersions()
-382   * @see #setMaxVersions(int)
-383   * @return this
-384   */
-385  public Scan setTimeStamp(long 
timestamp)
-386  throws IOException {
-387try {
-388  super.setTimeRange(timestamp, 
timestamp + 1);
-389} catch(Exception e) {
-390  // This should never happen, unless 
integer overflow or something extremely wrong...
-391  LOG.error("TimeRange failed, likely 
caused by 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
index ad15c2c..8fffd0c 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class LruBlockCache
+public class LruBlockCache
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ResizableBlockCache, 
HeapSize
 A block cache implementation that is memory-aware using HeapSize,
@@ -758,7 +758,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -767,7 +767,7 @@ implements 
 
 LRU_MIN_FACTOR_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MIN_FACTOR_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MIN_FACTOR_CONFIG_NAME
 Percentage of total size that eviction will evict until; 
e.g. if set to .8, then we will keep
  evicting during an eviction run till the cache size is down to 80% of the 
total.
 
@@ -782,7 +782,7 @@ implements 
 
 LRU_ACCEPTABLE_FACTOR_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME
 Acceptable size of cache (no evictions if size < 
acceptable)
 
 See Also:
@@ -796,7 +796,7 @@ implements 
 
 LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME
 Hard capacity limit of cache, will reject any put if size > 
this * acceptable
 
 See Also:
@@ -810,7 +810,7 @@ implements 
 
 LRU_SINGLE_PERCENTAGE_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_SINGLE_PERCENTAGE_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_SINGLE_PERCENTAGE_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -823,7 +823,7 @@ implements 
 
 LRU_MULTI_PERCENTAGE_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MULTI_PERCENTAGE_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MULTI_PERCENTAGE_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -836,7 +836,7 @@ implements 
 
 LRU_MEMORY_PERCENTAGE_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MEMORY_PERCENTAGE_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_MEMORY_PERCENTAGE_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -849,7 +849,7 @@ implements 
 
 LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME
 Configuration key to force data-block always (except 
in-memory are too much)
  cached in memory for in-memory hfile, unlike inMemory, which is a 
column-family
  configuration, inMemoryForceMode is a cluster-wide configuration
@@ -865,7 +865,7 @@ implements 
 
 DEFAULT_LOAD_FACTOR
-static finalfloat DEFAULT_LOAD_FACTOR
+static finalfloat DEFAULT_LOAD_FACTOR
 
 See Also:
 Constant
 Field Values
@@ -878,7 +878,7 @@ 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/JarFinder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/JarFinder.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/JarFinder.html
index 614f42d..a10d0e5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/JarFinder.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/JarFinder.html
@@ -98,97 +98,100 @@
 090  private static void zipDir(File dir, 
String relativePath, ZipOutputStream zos,
 091 boolean 
start) throws IOException {
 092String[] dirList = dir.list();
-093for (String aDirList : dirList) {
-094  File f = new File(dir, aDirList);
-095  if (!f.isHidden()) {
-096if (f.isDirectory()) {
-097  if (!start) {
-098ZipEntry dirEntry = new 
ZipEntry(relativePath + f.getName() + "/");
-099zos.putNextEntry(dirEntry);
-100zos.closeEntry();
-101  }
-102  String filePath = 
f.getPath();
-103  File file = new 
File(filePath);
-104  zipDir(file, relativePath + 
f.getName() + "/", zos, false);
-105}
-106else {
-107  String path = relativePath + 
f.getName();
-108  if 
(!path.equals(JarFile.MANIFEST_NAME)) {
-109ZipEntry anEntry = new 
ZipEntry(path);
-110copyToZipStream(f, anEntry, 
zos);
-111  }
-112}
-113  }
-114}
-115  }
-116
-117  private static void createJar(File dir, 
File jarFile) throws IOException {
-118Preconditions.checkNotNull(dir, 
"dir");
-119Preconditions.checkNotNull(jarFile, 
"jarFile");
-120File jarDir = 
jarFile.getParentFile();
-121if (!jarDir.exists()) {
-122  if (!jarDir.mkdirs()) {
-123throw new 
IOException(MessageFormat.format("could not create dir [{0}]",
-124  
 jarDir));
-125  }
-126}
-127try (FileOutputStream fos = new 
FileOutputStream(jarFile);
-128 JarOutputStream jos = new 
JarOutputStream(fos)) {
-129  jarDir(dir, "", jos);
-130}
-131  }
-132
-133  /**
-134   * Returns the full path to the Jar 
containing the class. It always return a
-135   * JAR.
-136   *
-137   * @param klass class.
-138   *
-139   * @return path to the Jar containing 
the class.
-140   */
-141  public static String getJar(Class 
klass) {
-142Preconditions.checkNotNull(klass, 
"klass");
-143ClassLoader loader = 
klass.getClassLoader();
-144if (loader != null) {
-145  String class_file = 
klass.getName().replaceAll("\\.", "/") + ".class";
-146  try {
-147for (Enumeration itr = 
loader.getResources(class_file);
-148 itr.hasMoreElements(); ) {
-149  URL url = (URL) 
itr.nextElement();
-150  String path = url.getPath();
-151  if (path.startsWith("file:")) 
{
-152path = 
path.substring("file:".length());
-153  }
-154  path = URLDecoder.decode(path, 
"UTF-8");
-155  if 
("jar".equals(url.getProtocol())) {
-156path = 
URLDecoder.decode(path, "UTF-8");
-157return 
path.replaceAll("!.*$", "");
-158  }
-159  else if 
("file".equals(url.getProtocol())) {
-160String klassName = 
klass.getName();
-161klassName = 
klassName.replace(".", "/") + ".class";
-162path = path.substring(0, 
path.length() - klassName.length());
-163File baseDir = new 
File(path);
-164File testDir = new 
File(System.getProperty("test.build.dir", "target/test-dir"));
-165testDir = 
testDir.getAbsoluteFile();
-166if (!testDir.exists()) {
-167  testDir.mkdirs();
-168}
-169File tempJar = 
File.createTempFile("hadoop-", "", testDir);
-170tempJar = new 
File(tempJar.getAbsolutePath() + ".jar");
-171tempJar.deleteOnExit();
-172createJar(baseDir, 
tempJar);
-173return 
tempJar.getAbsolutePath();
-174  }
-175}
-176  }
-177  catch (IOException e) {
-178throw new RuntimeException(e);
+093if (dirList == null) {
+094  return;
+095}
+096for (String aDirList : dirList) {
+097  File f = new File(dir, aDirList);
+098  if (!f.isHidden()) {
+099if (f.isDirectory()) {
+100  if (!start) {
+101ZipEntry dirEntry = new 
ZipEntry(relativePath + f.getName() + "/");
+102zos.putNextEntry(dirEntry);
+103zos.closeEntry();
+104  }
+105  String filePath = 
f.getPath();
+106  File file = new 
File(filePath);
+107  zipDir(file, relativePath + 
f.getName() + "/", zos, false);
+108}
+109else {
+110  String path = relativePath + 
f.getName();
+111  if 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index f96dca6..0179a3c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AssignmentManager
+public class AssignmentManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ServerListener
 The AssignmentManager is the coordinator for region 
assign/unassign operations.
@@ -280,34 +280,38 @@ implements pendingAssignQueue
 
 
+private RegionNormalizer
+regionNormalizer
+
+
 private RegionStateListener
 regionStateListener
 
-
+
 private RegionStates
 regionStates
 
-
+
 private RegionStateStore
 regionStateStore
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RIT_CHORE_INTERVAL_MSEC_CONF_KEY
 
-
+
 private AssignmentManager.RegionInTransitionChore
 ritChore
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
 running
 
-
+
 private boolean
 shouldAssignRegionsWithFavoredNodes
 
-
+
 private static UnassignProcedure[]
 UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
 
@@ -562,116 +566,120 @@ implements getRegionInfo(byte[]regionName)
 
 
+RegionNormalizer
+getRegionNormalizer()
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionStates.RegionStateNode
 getRegionsInTransition()
 
-
+
 RegionStates
 getRegionStates()
 
-
+
 RegionStateStore
 getRegionStateStore()
 
-
+
 Pairhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
 getReopenStatus(TableNametableName)
 Used by the client (via master) to identify if all regions 
have the schema updates
 
 
-
+
 int
 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 
PipelineAckStatusGetter {
-139Status get(PipelineAckProto ack);
-140  }
-141

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index ec0d665..013a7c2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -29,560 +29,603 @@
 021import java.util.List;
 022import java.util.Map;
 023import java.util.Optional;
-024import 
java.util.concurrent.CompletableFuture;
-025import 
java.util.concurrent.ExecutorService;
-026import java.util.regex.Pattern;
-027
-028import org.apache.commons.logging.Log;
-029import 
org.apache.commons.logging.LogFactory;
-030import 
org.apache.hadoop.hbase.ClusterStatus;
-031import 
org.apache.hadoop.hbase.HRegionInfo;
-032import 
org.apache.hadoop.hbase.ProcedureInfo;
-033import 
org.apache.hadoop.hbase.RegionLoad;
-034import 
org.apache.hadoop.hbase.ServerName;
-035import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-039import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-040import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-041import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-042import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-043import 
org.apache.hadoop.hbase.util.Pair;
-044
-045/**
-046 * The implementation of AsyncAdmin.
-047 */
-048@InterfaceAudience.Private
-049public class AsyncHBaseAdmin implements 
AsyncAdmin {
-050
-051  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
-052
-053  private final RawAsyncHBaseAdmin 
rawAdmin;
-054
-055  private final ExecutorService pool;
-056
-057  AsyncHBaseAdmin(RawAsyncHBaseAdmin 
rawAdmin, ExecutorService pool) {
-058this.rawAdmin = rawAdmin;
-059this.pool = pool;
-060  }
-061
-062  private T 
CompletableFutureT wrap(CompletableFutureT future) {
-063CompletableFutureT 
asyncFuture = new CompletableFuture();
-064future.whenCompleteAsync((r, e) - 
{
-065  if (e != null) {
-066
asyncFuture.completeExceptionally(e);
-067  } else {
-068asyncFuture.complete(r);
-069  }
-070}, pool);
-071return asyncFuture;
-072  }
-073
-074  @Override
-075  public CompletableFutureBoolean 
tableExists(TableName tableName) {
-076return 
wrap(rawAdmin.tableExists(tableName));
-077  }
-078
-079  @Override
-080  public 
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
-081  boolean includeSysTables) {
-082return 
wrap(rawAdmin.listTables(pattern, includeSysTables));
-083  }
-084
-085  @Override
-086  public 
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
-087  boolean includeSysTables) {
-088return 
wrap(rawAdmin.listTableNames(pattern, includeSysTables));
-089  }
-090
-091  @Override
-092  public 
CompletableFutureTableDescriptor getTableDescriptor(TableName 
tableName) {
-093return 
wrap(rawAdmin.getTableDescriptor(tableName));
-094  }
-095
-096  @Override
-097  public CompletableFutureVoid 
createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
-098  int numRegions) {
-099return 
wrap(rawAdmin.createTable(desc, startKey, endKey, numRegions));
-100  }
-101
-102  @Override
-103  public CompletableFutureVoid 
createTable(TableDescriptor desc, Optionalbyte[][] splitKeys) {
-104return 
wrap(rawAdmin.createTable(desc, splitKeys));
-105  }
-106
-107  @Override
-108  public CompletableFutureVoid 
deleteTable(TableName tableName) {
-109return 
wrap(rawAdmin.deleteTable(tableName));
-110  }
-111
-112  @Override
-113  public 
CompletableFutureListTableDescriptor deleteTables(Pattern 
pattern) {
-114return 
wrap(rawAdmin.deleteTables(pattern));
-115  }
-116
-117  @Override
-118  public CompletableFutureVoid 
truncateTable(TableName tableName, boolean preserveSplits) {
-119return 
wrap(rawAdmin.truncateTable(tableName, preserveSplits));
-120  }
-121
-122  @Override
-123  public CompletableFutureVoid 
enableTable(TableName tableName) {
-124return 
wrap(rawAdmin.enableTable(tableName));
-125  }
-126
-127  @Override
-128  public 
CompletableFutureListTableDescriptor enableTables(Pattern 
pattern) {
-129return 
wrap(rawAdmin.enableTables(pattern));
-130  }
-131
-132  @Override
-133  public CompletableFutureVoid 
disableTable(TableName tableName) {
-134return 
wrap(rawAdmin.disableTable(tableName));
-135  }
-136
-137  @Override
-138  public 
CompletableFutureListTableDescriptor disableTables(Pattern 
pattern) {
-139return 
wrap(rawAdmin.disableTables(pattern));
-140  }
-141
-142  @Override
-143  public 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-140import 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  HColumnDescriptor[] 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 885e6f8..cd44020 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -33,871 +33,881 @@
 025import 
java.nio.charset.StandardCharsets;
 026import java.util.ArrayList;
 027import java.util.Arrays;
-028import java.util.Collection;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Set;
-032import java.util.TreeMap;
-033import java.util.TreeSet;
-034import java.util.UUID;
-035import java.util.function.Function;
-036import java.util.stream.Collectors;
-037
-038import 
org.apache.commons.lang.StringUtils;
-039import org.apache.commons.logging.Log;
-040import 
org.apache.commons.logging.LogFactory;
-041import 
org.apache.hadoop.conf.Configuration;
-042import org.apache.hadoop.fs.FileSystem;
-043import org.apache.hadoop.fs.Path;
-044import org.apache.hadoop.hbase.Cell;
-045import 
org.apache.hadoop.hbase.CellComparator;
-046import 
org.apache.hadoop.hbase.CellUtil;
-047import 
org.apache.hadoop.hbase.HColumnDescriptor;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.KeyValue;
-052import 
org.apache.hadoop.hbase.KeyValueUtil;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-055import 
org.apache.hadoop.hbase.client.Connection;
-056import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-057import 
org.apache.hadoop.hbase.client.Put;
-058import 
org.apache.hadoop.hbase.client.RegionLocator;
-059import 
org.apache.hadoop.hbase.client.Table;
-060import 
org.apache.hadoop.hbase.fs.HFileSystem;
-061import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-062import 
org.apache.hadoop.hbase.io.compress.Compression;
-063import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-064import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-065import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-066import 
org.apache.hadoop.hbase.io.hfile.HFile;
-067import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-068import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Set;
+031import java.util.TreeMap;
+032import java.util.TreeSet;
+033import java.util.UUID;
+034import java.util.function.Function;
+035import java.util.stream.Collectors;
+036
+037import 
org.apache.commons.lang.StringUtils;
+038import org.apache.commons.logging.Log;
+039import 
org.apache.commons.logging.LogFactory;
+040import 
org.apache.hadoop.conf.Configuration;
+041import org.apache.hadoop.fs.FileSystem;
+042import org.apache.hadoop.fs.Path;
+043import org.apache.hadoop.hbase.Cell;
+044import 
org.apache.hadoop.hbase.CellComparator;
+045import 
org.apache.hadoop.hbase.CellUtil;
+046import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+047import 
org.apache.hadoop.hbase.client.TableDescriptor;
+048import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+049import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+050import 
org.apache.hadoop.hbase.client.Connection;
+051import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+052import 
org.apache.hadoop.hbase.client.Put;
+053import 
org.apache.hadoop.hbase.client.RegionLocator;
+054import 
org.apache.hadoop.hbase.client.Table;
+055import 
org.apache.hadoop.hbase.fs.HFileSystem;
+056import 
org.apache.hadoop.hbase.HConstants;
+057import 
org.apache.hadoop.hbase.HRegionLocation;
+058import 
org.apache.hadoop.hbase.HTableDescriptor;
+059import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+060import 
org.apache.hadoop.hbase.io.compress.Compression;
+061import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+062import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+063import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+064import 
org.apache.hadoop.hbase.io.hfile.HFile;
+065import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
+066import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+067import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
+068import 
org.apache.hadoop.hbase.KeyValue;
+069import 
org.apache.hadoop.hbase.KeyValueUtil;
 070import 
org.apache.hadoop.hbase.regionserver.BloomType;
 071import 
org.apache.hadoop.hbase.regionserver.HStore;
 072import 
org.apache.hadoop.hbase.regionserver.StoreFile;
 073import 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
+060 * Cacheable Blocks of an {@link HFile} 
version 2 file.
+061 * Version 2 was introduced in 
hbase-0.92.0.
+062 *
+063 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
+064 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857). Support
+065 * for Version 1 was removed in 
hbase-1.3.0.
 066 *
 067 * h3HFileBlock: Version 
2/h3
 068 * In version 2, a block is structured as 
follows:
@@ -120,582 +120,582 @@
 112public class HFileBlock implements 
Cacheable {
 113  private static final Log LOG = 
LogFactory.getLog(HFileBlock.class);
 114
-115  /** Type of block. Header field 0. */
-116  private BlockType blockType;
-117
-118  /**
-119   * Size on disk excluding header, 
including checksum. Header field 1.
-120   * @see Writer#putHeader(byte[], int, 
int, int, int)
-121   */
-122  private int onDiskSizeWithoutHeader;
-123
-124  /**
-125   * Size of pure data. Does not include 
header or checksums. Header field 2.
-126   * @see Writer#putHeader(byte[], int, 
int, int, int)
-127   */
-128  private int 
uncompressedSizeWithoutHeader;
-129
-130  /**
-131   * The offset of the previous block on 
disk. Header field 3.
-132   * @see Writer#putHeader(byte[], int, 
int, int, int)
-133   */
-134  private long prevBlockOffset;
-135
-136  /**
-137   * Size on disk of header + data. 
Excludes checksum. Header field 6,
-138   * OR calculated from {@link 
#onDiskSizeWithoutHeader} when using HDFS checksum.
-139   * @see Writer#putHeader(byte[], int, 
int, int, int)
-140   */
-141  private int onDiskDataSizeWithHeader;
-142
-143
-144  /**
-145   * The in-memory representation of the 
hfile block. Can be on or offheap. Can be backed by
-146   * a single ByteBuffer or by many. Make 
no assumptions.
-147   *
-148   * pBe careful reading from 
this codebuf/code. Duplicate and work on the duplicate or if
-149   * not, be sure to reset position and 
limit else trouble down the road.
-150   *
-151   * pTODO: Make this read-only 
once made.
-152   *
-153   * pWe are using the ByteBuff 
type. ByteBuffer is not extensible yet we need to be able to have
-154   * a ByteBuffer-like API across 
multiple ByteBuffers reading from a cache such as BucketCache.
-155   * So, we have this ByteBuff type. 
Unfortunately, it is spread all about HFileBlock. Would be
-156   * good if could be confined to 
cache-use only but hard-to-do.
-157   */
-158  private ByteBuff buf;
-159
-160  /** Meta data that holds meta 
information on the hfileblock.
-161   */
-162  private HFileContext fileContext;
-163
-164  /**
-165   * The offset of this block in the 
file. Populated by the reader for
-166   * convenience of access. This offset 
is not part of the block header.
-167   */
-168  private long offset = UNSET;
-169
-170  private MemoryType memType = 
MemoryType.EXCLUSIVE;
-171
-172  /**
-173   * The on-disk size of the next block, 
including the header and checksums if present, obtained by
-174   * peeking into the first {@link 
HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's
-175   * header, or UNSET if unknown.
-176   *
-177   * Blocks try to carry the size of the 
next block to read in this data member. They will even have
-178   * this value when served from cache. 
Could save a seek in the case where we are iterating through
-179   * a file and some of the blocks come 
from cache. If from cache, then having this info to hand
-180   * will save us doing a seek to read 
the header so we can read the body of a block.
-181   * TODO: see how effective this is at 
saving seeks.
-182   */
-183  private int nextBlockOnDiskSize = 
UNSET;
-184
-185  /**
-186   * On a checksum failure, do these many 
succeeding read requests using hdfs checksums before
-187   * auto-reenabling hbase checksum 
verification.
-188   */
-189  static final int 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Public.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Public.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Public.html
index 1adab1b..6fc097a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Public.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Public.html
@@ -654,75 +654,81 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 interface
+AsyncAdminBuilderT 
extends AsyncAdmin
+For creating AsyncAdmin.
+
+
+
+interface
 AsyncConnection
 The asynchronous version of Connection.
 
 
-
+
 interface
 AsyncTable
 The asynchronous table for normal users.
 
 
-
+
 interface
 AsyncTableBase
 The base interface for asynchronous version of Table.
 
 
-
+
 interface
 AsyncTableBuilderT 
extends AsyncTableBase
 For creating AsyncTable 
or RawAsyncTable.
 
 
-
+
 interface
 AsyncTableRegionLocator
 The asynchronous version of RegionLocator.
 
 
-
+
 interface
 Attributes
 
-
+
 interface
 BufferedMutator
 Used to communicate with a single HBase table similar to Table but 
meant for
  batched, asynchronous puts.
 
 
-
+
 static interface
 BufferedMutator.ExceptionListener
 Listens for asynchronous exceptions on a BufferedMutator.
 
 
-
+
 class
 BufferedMutatorParams
 Parameters for instantiating a BufferedMutator.
 
 
-
+
 interface
 ColumnFamilyDescriptor
 An ColumnFamilyDescriptor contains information about a 
column family such as the
  number of versions, compression settings, etc.
 
 
-
+
 class
 ColumnFamilyDescriptorBuilder
 
-
+
 class
 CompactionState
 POJO representing the compaction state
 
 
-
+
 class
 CompactType
 Currently, there are only two compact types:
@@ -730,26 +736,26 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
  MOB means do mob files compaction.
 
 
-
+
 interface
 Connection
 A cluster connection encapsulating lower level individual 
connections to actual servers and
  a connection to zookeeper.
 
 
-
+
 class
 ConnectionFactory
 A non-instantiable class that manages creation of Connections.
 
 
-
+
 class
 Consistency
 Consistency defines the expected consistency level for an 
operation.
 
 
-
+
 class
 Cursor
 Scan cursor to tell client where server is scanning
@@ -758,199 +764,199 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  Result.getCursor()
 
 
-
+
 class
 Delete
 Used to perform Delete operations on a single row.
 
 
-
+
 class
 DoNotRetryRegionException
 Similar to RegionException, but disables retries.
 
 
-
+
 class
 Durability
 Enum describing the durability guarantees for tables and Mutations
  Note that the items must be sorted in order of increasing durability
 
 
-
+
 class
 Get
 Used to perform Get operations on a single row.
 
 
-
+
 class
 HTableMultiplexer
 HTableMultiplexer provides a thread-safe non blocking PUT 
API across all the tables.
 
 
-
+
 static class
 HTableMultiplexer.HTableMultiplexerStatus
 HTableMultiplexerStatus keeps track of the current status 
of the HTableMultiplexer.
 
 
-
+
 class
 ImmutableHTableDescriptor
 Deprecated.
 
 
-
+
 class
 Increment
 Used to perform Increment operations on a single row.
 
 
-
+
 class
 IsolationLevel
 Specify Isolation levels in Scan operations.
 
 
-
+
 class
 MasterSwitchType
 Represents the master switch type
 
 
-
+
 class
 MobCompactPartitionPolicy
 Enum describing the mob compact partition policy 
types.
 
 
-
+
 class
 Mutation
 
-
+
 class
 NoServerForRegionException
 Thrown when no region server can be found for a region
 
 
-
+
 class
 Operation
 Superclass for any type that maps to a potentially 
application-level query.
 
 
-
+
 class
 OperationWithAttributes
 
-
+
 class
 Put
 Used to perform Put operations for a single row.
 
 
-
+
 class
 Query
 
-
+
 interface
 RawAsyncTable
 A low level asynchronous table.
 
 
-
+
 static interface
 RawAsyncTable.CoprocessorCallableS,R
 Delegate to a protobuf rpc call.
 
 
-
+
 static interface
 RawAsyncTable.CoprocessorCallbackR
 The callback when we want to execute a coprocessor call on 
a range of regions.
 
 
-
+
 interface
 RawScanResultConsumer
 Receives Result for an 
asynchronous scan.
 
 
-
+
 static interface
 RawScanResultConsumer.ScanController
 Used to suspend or stop a scan.
 
 
-
+
 static interface
 RawScanResultConsumer.ScanResumer
 Used to resume a scan.
 
 
-
+
 class
 RegionLoadStats
 POJO representing region server load
 
 
-
+
 interface
 RegionLocator
 Used to view region location information for a single HBase 
table.
 
 
-
+
 class
 RegionOfflineException
 Thrown when a table can not be located
 
 
-
+
 interface
 RequestController
 An interface for client request 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 92ac5ef..17485a6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends AsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -240,7 +240,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
+AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
TableNametableName)
 
 
@@ -258,7 +258,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Specified by:
 getOperationTypein
 classAsyncHBaseAdmin.TableProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
index 25f52aa..07e9e74 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface AsyncHBaseAdmin.AdminRpcCallRESP,REQ
+private static interface AsyncHBaseAdmin.AdminRpcCallRESP,REQ
 
 
 
@@ -159,7 +159,7 @@ private static interface 
 
 call
-voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
+voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
   HBaseRpcControllercontroller,
   REQreq,
   org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackRESPdone)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
index 86da901..3cef5d5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface AsyncHBaseAdmin.ConverterD,S
+private static interface AsyncHBaseAdmin.ConverterD,S
 
 
 
@@ -156,7 +156,7 @@ private static interface 
 
 convert
-Dconvert(Ssrc)
+Dconvert(Ssrc)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index df5fa6f..cd9161c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+private class AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
 extends 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
index 49714a2..d0f1508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
@@ -172,1438 +172,1562 @@
 164MapServerName, 
ListHRegionInfo clusterState;
 165
 166protected final RackManager 
rackManager;
-167
-168protected Cluster(
-169MapServerName, 
ListHRegionInfo clusterState,
-170MapString, 
DequeBalancerRegionLoad loads,
-171RegionLocationFinder 
regionFinder,
-172RackManager rackManager) {
-173  this(null, clusterState, loads, 
regionFinder, rackManager);
-174}
-175
-176@SuppressWarnings("unchecked")
-177protected Cluster(
-178CollectionHRegionInfo 
unassignedRegions,
-179MapServerName, 
ListHRegionInfo clusterState,
-180MapString, 
DequeBalancerRegionLoad loads,
-181RegionLocationFinder 
regionFinder,
-182RackManager rackManager) {
-183
-184  if (unassignedRegions == null) {
-185unassignedRegions = 
EMPTY_REGION_LIST;
-186  }
+167// Maps region - rackIndex - 
locality of region on rack
+168private float[][] rackLocalities;
+169// Maps localityType - region 
- [server|rack]Index with highest locality
+170private int[][] 
regionsToMostLocalEntities;
+171
+172protected Cluster(
+173MapServerName, 
ListHRegionInfo clusterState,
+174MapString, 
DequeBalancerRegionLoad loads,
+175RegionLocationFinder 
regionFinder,
+176RackManager rackManager) {
+177  this(null, clusterState, loads, 
regionFinder, rackManager);
+178}
+179
+180@SuppressWarnings("unchecked")
+181protected Cluster(
+182CollectionHRegionInfo 
unassignedRegions,
+183MapServerName, 
ListHRegionInfo clusterState,
+184MapString, 
DequeBalancerRegionLoad loads,
+185RegionLocationFinder 
regionFinder,
+186RackManager rackManager) {
 187
-188  serversToIndex = new 
HashMap();
-189  hostsToIndex = new 
HashMap();
-190  racksToIndex = new 
HashMap();
-191  tablesToIndex = new 
HashMap();
-192
-193  //TODO: We should get the list of 
tables from master
-194  tables = new ArrayList();
-195  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+188  if (unassignedRegions == null) {
+189unassignedRegions = 
EMPTY_REGION_LIST;
+190  }
+191
+192  serversToIndex = new 
HashMap();
+193  hostsToIndex = new 
HashMap();
+194  racksToIndex = new 
HashMap();
+195  tablesToIndex = new 
HashMap();
 196
-197  numRegions = 0;
-198
-199  ListListInteger 
serversPerHostList = new ArrayList();
-200  ListListInteger 
serversPerRackList = new ArrayList();
-201  this.clusterState = clusterState;
-202  this.regionFinder = regionFinder;
-203
-204  // Use servername and port as there 
can be dead servers in this list. We want everything with
-205  // a matching hostname and port to 
have the same index.
-206  for (ServerName sn : 
clusterState.keySet()) {
-207if (sn == null) {
-208  LOG.warn("TODO: Enable TRACE on 
BaseLoadBalancer. Empty servername); " +
-209  "skipping; unassigned 
regions?");
-210  if (LOG.isTraceEnabled()) {
-211LOG.trace("EMPTY SERVERNAME " 
+ clusterState.toString());
-212  }
-213  continue;
-214}
-215if 
(serversToIndex.get(sn.getAddress().toString()) == null) {
-216  
serversToIndex.put(sn.getHostAndPort(), numServers++);
-217}
-218if 
(!hostsToIndex.containsKey(sn.getHostname())) {
-219  
hostsToIndex.put(sn.getHostname(), numHosts++);
-220  serversPerHostList.add(new 
ArrayList(1));
+197  //TODO: We should get the list of 
tables from master
+198  tables = new ArrayList();
+199  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+200
+201  numRegions = 0;
+202
+203  ListListInteger 
serversPerHostList = new ArrayList();
+204  ListListInteger 
serversPerRackList = new ArrayList();
+205  this.clusterState = clusterState;
+206  this.regionFinder = regionFinder;
+207
+208  // Use servername and port as there 
can be dead servers in this list. We want everything with
+209  // a matching hostname and port to 
have the same index.
+210  for (ServerName sn : 
clusterState.keySet()) {

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
index 681e137..d167295 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ReplicationSourceManager.AdoptAbandonedQueuesWorker
+class ReplicationSourceManager.AdoptAbandonedQueuesWorker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -228,7 +228,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 AdoptAbandonedQueuesWorker
-publicAdoptAbandonedQueuesWorker()
+publicAdoptAbandonedQueuesWorker()
 
 
 
@@ -245,7 +245,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
index 227c6cb..91e8ca3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ReplicationSourceManager.NodeFailoverWorker
+class ReplicationSourceManager.NodeFailoverWorker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 Class responsible to setup new ReplicationSources to take 
care of the
  queues from dead region servers.
@@ -259,7 +259,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rsZnode
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZnode
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String rsZnode
 
 
 
@@ -268,7 +268,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rq
-private finalReplicationQueues 
rq
+private finalReplicationQueues 
rq
 
 
 
@@ -277,7 +277,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 rp
-private finalReplicationPeers 
rp
+private finalReplicationPeers 
rp
 
 
 
@@ -286,7 +286,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 clusterId
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
 title="class or interface in java.util">UUID clusterId
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
 title="class or interface in java.util">UUID clusterId
 
 
 
@@ -303,7 +303,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 NodeFailoverWorker
-publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode)
+publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode)
 
 Parameters:
 rsZnode - 
@@ -316,7 +316,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 NodeFailoverWorker
-publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode,
+publicNodeFailoverWorker(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrsZnode,
  

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 66fac9a..ff8a519 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-2228
+2231
 0
 0
-14494
+14590
 
 Files
 
@@ -412,7 +412,7 @@
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
-27
+39
 
 org/apache/hadoop/hbase/HConstants.java
 0
@@ -432,7 +432,7 @@
 org/apache/hadoop/hbase/HTableDescriptor.java
 0
 0
-31
+34
 
 org/apache/hadoop/hbase/HealthChecker.java
 0
@@ -894,6 +894,16 @@
 0
 1
 
+org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+0
+0
+12
+
+org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+0
+0
+52
+
 org/apache/hadoop/hbase/client/CompactType.java
 0
 0
@@ -937,7 +947,7 @@
 org/apache/hadoop/hbase/client/HBaseAdmin.java
 0
 0
-81
+80
 
 org/apache/hadoop/hbase/client/HRegionLocator.java
 0
@@ -959,6231 +969,6236 @@
 0
 8
 
+org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
+0
+0
+1
+
 org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/Increment.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/MasterCallable.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/MetaCache.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/MetricsConnection.java
 0
 0
 39
-
+
 org/apache/hadoop/hbase/client/MultiAction.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/MultiResponse.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/MultiServerCallable.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/client/Mutation.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/client/Operation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/client/Put.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/client/Query.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/RawAsyncTable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RawScanResultConsumer.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RegionReplicaUtil.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/RegionServerCallable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/Registry.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/RegistryFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RequestController.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/RequestControllerFactory.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/Result.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/ResultScanner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RetriesExhaustedException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RetryingCallerInterceptor.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/ReversedClientScanner.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/client/ReversedScannerCallable.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/client/RowAccess.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RowMutations.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/RpcRetryingCaller.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/Scan.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/ScannerCallable.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/SimpleRequestController.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/client/Table.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/client/TableDescriptor.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
 0
 0
-37
-
+43
+
 org/apache/hadoop/hbase/client/TableSnapshotScanner.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/TableState.java
 0
 0
 35
-
+
 org/apache/hadoop/hbase/client/VersionInfoUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java
 0
 0
 1
-
+
 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hadoop/hbase/types/RawBytesTerminated.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/types/RawBytesTerminated.html 
b/apidocs/org/apache/hadoop/hbase/types/RawBytesTerminated.html
index 3965824..1fc0196 100644
--- a/apidocs/org/apache/hadoop/hbase/types/RawBytesTerminated.html
+++ b/apidocs/org/apache/hadoop/hbase/types/RawBytesTerminated.html
@@ -4,7 +4,7 @@
 
 
 
-RawBytesTerminated (Apache HBase 2.0.0-SNAPSHOT API)
+RawBytesTerminated (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-RawDouble (Apache HBase 2.0.0-SNAPSHOT API)
+RawDouble (Apache HBase 3.0.0-SNAPSHOT API)