[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
index 5f469e7..02eb25f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
@@ -151,17 +151,6 @@
 
 
 TableBuilder
-TableBuilder.setWriteBufferSize(long writeBufferSize)
-Set the write buffer size which by default is specified by 
the
- hbase.client.write.buffer setting.
-
-
-
-TableBuilder
-TableBuilderBase.setWriteBufferSize(long writeBufferSize) 
-
-
-TableBuilder
 TableBuilder.setWriteRpcTimeout(int timeout)
 Set timeout for each write(put, delete) rpc request.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index aad311e..36bf254 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -538,25 +538,25 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.Consistency
-org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
-org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.RegionLocateType
-org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
-org.apache.hadoop.hbase.client.CompactionState
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
 org.apache.hadoop.hbase.client.TableState.State
-org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationState
 org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
+org.apache.hadoop.hbase.client.MasterSwitchType
+org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
+org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
+org.apache.hadoop.hbase.client.Scan.ReadType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
index e6f355b..ff8714f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
@@ -857,135 +857,128 @@ service.
 
 
 
-BufferedMutatorImpl
-
- Used to communicate with a single HBase table similar to Table
- but meant for batched, potentially asynchronous puts.
-
-
-
 BufferedMutatorImpl.QueueRowAccess 
 
-
+
 BufferedMutatorParams
 Parameters for instantiating a BufferedMutator.
 
 
-
+
 Cancellable
 This should be implemented by the Get/Scan implementations 
that
  talk to replica regions.
 
 
-
+
 CancellableRegionServerCallable
 This class is used to unify HTable calls with AsyncProcess 
Framework.
 
 
-
+
 ClientScanner
 Implements the scanner interface for the HBase client.
 
 
-
+
 ClientServiceCallable
 A RegionServerCallable set to use the Client protocol.
 
 
-
+
 ClientSideRegionScanner
 A client scanner for a region opened for 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 1feb52f..5f60543 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -934,6 +934,10 @@
  
 add(Map.Entry>) - Method in class 
org.apache.hadoop.hbase.client.DelayingRunner
  
+add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Delete
+
+Add an existing delete marker to this Delete object.
+
 add(long)
 - Method in class org.apache.hadoop.hbase.client.HTableMultiplexer.AtomicAverageCounter
  
 add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Increment
@@ -1227,23 +1231,8 @@
  
 addAll(int,
 Collection) - Method in class 
org.apache.hadoop.hbase.util.SortedList
  
-addAllFile(Iterable) - Method in 
class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder
-
-repeated 
.google.protobuf.compiler.CodeGeneratorResponse.File file = 15;
-
-addAllFileToGenerate(Iterable)
 - Method in class 
org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder
-
-
- The .proto files that were explicitly listed on the command-line.
-
 addAllocation(long)
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket
  
-addAllProtoFile(Iterable) - Method in class 
org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder
-
-
- FileDescriptorProtos for all files in files_to_generate and everything
- they import.
-
 addAllServers(Collection
) - Method in class org.apache.hadoop.hbase.rsgroup.RSGroupInfo Adds the given servers to the group. @@ -1583,7 +1572,10 @@   addDeleteMarker(Cell) - Method in class org.apache.hadoop.hbase.client.Delete -Advanced use only. +Deprecated. +As of release 2.0.0, this will be removed in HBase 3.0.0. Use Delete.add(Cell) + instead + addDelFile(FileStatus) - Method in class org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartition   @@ -1692,40 +1684,6 @@ addFile(FileStatus) - Method in class org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition   -addFile(PluginProtos.CodeGeneratorResponse.File) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFile(int, PluginProtos.CodeGeneratorResponse.File) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFile(PluginProtos.CodeGeneratorResponse.File.Builder) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFile(int, PluginProtos.CodeGeneratorResponse.File.Builder) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFileBuilder() - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFileBuilder(int) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.Builder - -repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15; - -addFileToGenerate(String) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder - - - The .proto files that were explicitly listed on the command-line. - -addFileToGenerateBytes(ByteString) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder - - - The .proto files that were explicitly listed on the command-line. - addFilter(List) - Method in class org.apache.hadoop.hbase.filter.FilterList   addFilter(Filter) - Method in class org.apache.hadoop.hbase.filter.FilterList @@ -2086,42 +2044,6 @@ Add property to the namespace. -addProtoFile(DescriptorProtos.FileDescriptorProto) - Method in class org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler.PluginProtos.CodeGeneratorRequest.Builder - - - FileDescriptorProtos for all files in files_to_generate and everything - they import. - -addProtoFile(int, DescriptorProtos.FileDescriptorProto) - Method in class org.apache.hadoop.

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 
org.a

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMap scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.DeleteVersionsNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.DeleteVersionsNode.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.DeleteVersionsNode.html
new file mode 100644
index 000..b24a18e
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NewVersionBehaviorTracker.DeleteVersionsNode.html
@@ -0,0 +1,387 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+NewVersionBehaviorTracker.DeleteVersionsNode (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher
+Class NewVersionBehaviorTracker.DeleteVersionsNode
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker.DeleteVersionsNode
+
+
+
+
+
+
+
+Direct Known Subclasses:
+VisibilityNewVersionBehaivorTracker.VisibilityDeleteVersionsNode
+
+
+Enclosing class:
+NewVersionBehaviorTracker
+
+
+
+protected class NewVersionBehaviorTracker.DeleteVersionsNode
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+A data structure which contains infos we need that happens 
before this node's mvcc and
+ after the previous node's mvcc. A node means there is a version deletion at 
the mvcc and ts.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapLong,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true";
 title="class or interface in java.util">SortedSetLong>>
+deletesMap 
+
+
+long
+mvcc 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapLong,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true";
 title="class or interface in java.util">SortedSetLong>>
+mvccCountingMap 
+
+
+long
+ts 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Modifier
+Constructor and Description
+
+
+protected 
+DeleteVersionsNode() 
+
+
+protected 
+DeleteVersionsNode(long ts,
+  long mvcc) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+void
+addVersionDelete(Cell cell) 
+
+
+protected NewVersionBehaviorTracker.DeleteVersionsNode
+getDeepCopy() 
+
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
index 2bbddf9..7326fea 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
@@ -243,7 +243,7 @@ extends Procedure
-acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime, getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate, setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes, setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails, toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout, haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure, setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId, setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toS
 tringClass, toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp, wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/SequentialProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/SequentialProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/SequentialProcedure.html
index 1dc865f..51f34d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/SequentialProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/SequentialProcedure.html
@@ -238,7 +238,7 @@ extends Procedure
-abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doReleaseLock,
 elapsedTime,
 execute,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime, getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 rollback,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure, setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringCla
 ss, toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp, wasExecuted
+abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doReleaseLock,
 elapsedTime,
 execute,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 ge

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index 6c200a1..e6f8c2e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -1350,726 +1350,719 @@
 1342  }
 1343
 1344  @Override
-1345  public 
MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
-1346  RpcController controller, 
MasterProtos.DispatchMergingRegionsRequest request)
-1347  throws ServiceException {
-1348return 
stub.dispatchMergingRegions(controller, request);
-1349  }
-1350
-1351  @Override
-1352  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
-1353  
MasterProtos.AssignRegionRequest request) throws ServiceException {
-1354return 
stub.assignRegion(controller, request);
-1355  }
-1356
-1357  @Override
-1358  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
-1359  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
-1360return 
stub.unassignRegion(controller, request);
-1361  }
-1362
-1363  @Override
-1364  public 
MasterProtos.OfflineRegionResponse offlineRegion(RpcController controller,
-1365  
MasterProtos.OfflineRegionRequest request) throws ServiceException {
-1366return 
stub.offlineRegion(controller, request);
-1367  }
-1368
-1369  @Override
-1370  public 
MasterProtos.SplitTableRegionResponse splitRegion(RpcController controller,
-1371  
MasterProtos.SplitTableRegionRequest request) throws ServiceException {
-1372return 
stub.splitRegion(controller, request);
-1373  }
-1374
-1375  @Override
-1376  public 
MasterProtos.DeleteTableResponse deleteTable(RpcController controller,
-1377  
MasterProtos.DeleteTableRequest request) throws ServiceException {
-1378return 
stub.deleteTable(controller, request);
-1379  }
-1380
-1381  @Override
-1382  public 
MasterProtos.TruncateTableResponse truncateTable(RpcController controller,
-1383  
MasterProtos.TruncateTableRequest request) throws ServiceException {
-1384return 
stub.truncateTable(controller, request);
-1385  }
-1386
-1387  @Override
-1388  public 
MasterProtos.EnableTableResponse enableTable(RpcController controller,
-1389  
MasterProtos.EnableTableRequest request) throws ServiceException {
-1390return 
stub.enableTable(controller, request);
-1391  }
-1392
-1393  @Override
-1394  public 
MasterProtos.DisableTableResponse disableTable(RpcController controller,
-1395  
MasterProtos.DisableTableRequest request) throws ServiceException {
-1396return 
stub.disableTable(controller, request);
-1397  }
-1398
-1399  @Override
-1400  public 
MasterProtos.ModifyTableResponse modifyTable(RpcController controller,
-1401  
MasterProtos.ModifyTableRequest request) throws ServiceException {
-1402return 
stub.modifyTable(controller, request);
-1403  }
-1404
-1405  @Override
-1406  public 
MasterProtos.CreateTableResponse createTable(RpcController controller,
-1407  
MasterProtos.CreateTableRequest request) throws ServiceException {
-1408return 
stub.createTable(controller, request);
-1409  }
-1410
-1411  @Override
-1412  public 
MasterProtos.ShutdownResponse shutdown(RpcController controller,
-1413  MasterProtos.ShutdownRequest 
request) throws ServiceException {
-1414return stub.shutdown(controller, 
request);
-1415  }
-1416
-1417  @Override
-1418  public 
MasterProtos.StopMasterResponse stopMaster(RpcController controller,
-1419  MasterProtos.StopMasterRequest 
request) throws ServiceException {
-1420return 
stub.stopMaster(controller, request);
+1345  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
+1346  
MasterProtos.AssignRegionRequest request) throws ServiceException {
+1347return 
stub.assignRegion(controller, request);
+1348  }
+1349
+1350  @Override
+1351  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
+1352  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
+1353return 
stub.unassignRegion(controller, request);
+1354  }
+1355
+1356  @Override
+1357  public 
MasterProtos.OfflineRegionResponse o

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 8a444f4..f516036 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -538,7 +538,7 @@
 530}
 531
 532return new 
ClientServiceCallable(conn,
-533tableName, first, 
rpcControllerFactory.newController()) {
+533tableName, first, 
rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) {
 534  @Override
 535  protected byte[] rpcCall() throws 
Exception {
 536SecureBulkLoadClient secureClient 
= null;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 288299f..b7995cd 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-21
+  Last Published: 
2017-07-22
 
 
 



[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/mapreduce/SyncTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/SyncTable.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/SyncTable.html
index 030a25f..5c7f656 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/SyncTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/SyncTable.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class SyncTable
+public class SyncTable
 extends org.apache.hadoop.conf.Configured
 implements org.apache.hadoop.util.Tool
 
@@ -314,7 +314,7 @@ implements org.apache.hadoop.util.Tool
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -323,7 +323,7 @@ implements org.apache.hadoop.util.Tool
 
 
 SOURCE_HASH_DIR_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_HASH_DIR_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_HASH_DIR_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -336,7 +336,7 @@ implements org.apache.hadoop.util.Tool
 
 
 SOURCE_TABLE_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_TABLE_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_TABLE_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -349,7 +349,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TARGET_TABLE_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String TARGET_TABLE_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String TARGET_TABLE_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -362,7 +362,7 @@ implements org.apache.hadoop.util.Tool
 
 
 SOURCE_ZK_CLUSTER_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_ZK_CLUSTER_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String SOURCE_ZK_CLUSTER_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -375,7 +375,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TARGET_ZK_CLUSTER_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String TARGET_ZK_CLUSTER_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String TARGET_ZK_CLUSTER_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -388,7 +388,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DRY_RUN_CONF_KEY
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DRY_RUN_CONF_KEY
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DRY_RUN_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -401,7 +401,7 @@ implements org.apache.hadoop.util.Tool
 
 
 sourceHashDir
-org.apache.hadoop.fs.Path sourceHashDir
+org.apache.hadoop.fs.Path sourceHashDir
 
 
 
@@ -410,7 +410,7 @@ implements org.apache.hadoop.util.Tool
 
 
 sourceTableName
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String sourceTableName
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String sourceTableName
 
 
 
@@ -419,7 +419,7 @@ implements org.apache.hadoop.util.Tool
 
 
 targetTableName
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String targetTableName
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String targetTableName
 
 
 
@@ -428,7 +428,7 @@ implements org.apache.hadoop.util.Tool
 
 
 sourceZkCluster
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String sourceZkCluster
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String sourceZkCluster
 
 
 
@@ -437,7 +437,7 @@ implements org.apache.hadoop.util.Tool
 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 53cae9a..64d0880 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -1034,289 +1034,283 @@
 1026  protected List 
tryAtomicRegionLoad(ClientServiceCallable serviceCallable,
 1027  final TableName tableName, final 
byte[] first, final Collection lqis)
 1028  throws IOException {
-1029final List> famPaths = new ArrayList<>(lqis.size());
-1030for (LoadQueueItem lqi : lqis) {
-1031  if 
(!unmatchedFamilies.contains(Bytes.toString(lqi.family))) {
-1032
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
-1033  }
-1034}
-1035try {
-1036  List toRetry 
= new ArrayList<>();
-1037  Configuration conf = getConf();
-1038  byte[] region = 
RpcRetryingCallerFactory.instantiate(conf,
-1039  null). 
newCaller()
-1040  
.callWithRetries(serviceCallable, Integer.MAX_VALUE);
-1041  if (region == null) {
-1042LOG.warn("Attempt to bulk load 
region containing "
-1043+ 
Bytes.toStringBinary(first) + " into table "
-1044+ tableName  + " with files 
" + lqis
-1045+ " failed.  This is 
recoverable and they will be retried.");
-1046toRetry.addAll(lqis); // return 
lqi's to retry
-1047  }
-1048  // success
-1049  return toRetry;
-1050} catch (IOException e) {
-1051  LOG.error("Encountered 
unrecoverable error from region server, additional details: "
-1052  + 
serviceCallable.getExceptionMessageAdditionalDetail(), e);
-1053  throw e;
-1054}
-1055  }
-1056
-1057  private final String 
toString(List> list) {
-1058StringBuffer sb = new 
StringBuffer();
-1059sb.append("[");
-1060if(list != null){
-1061  for(Pair 
pair: list) {
-1062sb.append("{");
-1063
sb.append(Bytes.toStringBinary(pair.getFirst()));
-1064sb.append(",");
-1065sb.append(pair.getSecond());
-1066sb.append("}");
-1067  }
-1068}
-1069sb.append("]");
-1070return sb.toString();
-1071  }
-1072  private boolean 
isSecureBulkLoadEndpointAvailable() {
-1073String classes = 
getConf().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-1074return 
classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
-1075  }
-1076
-1077  /**
-1078   * Split a storefile into a top and 
bottom half, maintaining
-1079   * the metadata, recreating bloom 
filters, etc.
-1080   */
-1081  static void splitStoreFile(
-1082  Configuration conf, Path inFile,
-1083  HColumnDescriptor familyDesc, 
byte[] splitKey,
-1084  Path bottomOut, Path topOut) 
throws IOException {
-1085// Open reader with no block cache, 
and not in-memory
-1086Reference topReference = 
Reference.createTopReference(splitKey);
-1087Reference bottomReference = 
Reference.createBottomReference(splitKey);
-1088
-1089copyHFileHalf(conf, inFile, topOut, 
topReference, familyDesc);
-1090copyHFileHalf(conf, inFile, 
bottomOut, bottomReference, familyDesc);
-1091  }
-1092
-1093  /**
-1094   * Copy half of an HFile into a new 
HFile.
-1095   */
-1096  private static void copyHFileHalf(
-1097  Configuration conf, Path inFile, 
Path outFile, Reference reference,
-1098  HColumnDescriptor 
familyDescriptor)
-1099  throws IOException {
-1100FileSystem fs = 
inFile.getFileSystem(conf);
-1101CacheConfig cacheConf = new 
CacheConfig(conf);
-1102HalfStoreFileReader halfReader = 
null;
-1103StoreFileWriter halfWriter = null;
-1104try {
-1105  halfReader = new 
HalfStoreFileReader(fs, inFile, cacheConf, reference, true,
-1106  new AtomicInteger(0), true, 
conf);
-1107  Map fileInfo 
= halfReader.loadFileInfo();
-1108
-1109  int blocksize = 
familyDescriptor.getBlocksize();
-1110  Algorithm compression = 
familyDescriptor.getCompressionType();
-  BloomType bloomFilterType = 
familyDescriptor.getBloomFilterType();
-1112  HFileContext hFileContext = new 
HFileContextBuilder()
-1113  
.withCompression(compression)
-1114  
.withChecksumType(HStore.getChecksumType(conf))
-1115  
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
-1116  
.withBlockSize(blocksize)
-1117  

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index 3439e19..6d650b6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -134,7 +134,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class SplitTableRegionProcedure
+public class SplitTableRegionProcedure
 extends AbstractStateMachineRegionProcedure
 The procedure to split a region in a table.
  Takes lock on the parent region.
@@ -201,22 +201,26 @@ extends Field and Description
 
 
+private byte[]
+bestSplitRow 
+
+
 private HRegionInfo
 daughter_1_HRI 
 
-
+
 private HRegionInfo
 daughter_2_HRI 
 
-
+
 private static RegionState.State[]
 EXPECTED_SPLIT_STATES 
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean
 traceEnabled 
 
@@ -271,9 +275,12 @@ extends 
-private static void
-checkSplitRow(HRegionInfo regionToSplit,
- byte[] splitRow) 
+private void
+checkSplittable(MasterProcedureEnv env,
+   HRegionInfo regionToSplit,
+   byte[] splitRow)
+Check whether the region is splittable
+
 
 
 private AssignProcedure[]
@@ -509,7 +516,7 @@ extends 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -518,7 +525,7 @@ extends 
 
 traceEnabled
-private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean traceEnabled
+private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean traceEnabled
 
 
 
@@ -527,7 +534,7 @@ extends 
 
 daughter_1_HRI
-private HRegionInfo daughter_1_HRI
+private HRegionInfo daughter_1_HRI
 
 
 
@@ -536,7 +543,16 @@ extends 
 
 daughter_2_HRI
-private HRegionInfo daughter_2_HRI
+private HRegionInfo daughter_2_HRI
+
+
+
+
+
+
+
+bestSplitRow
+private byte[] bestSplitRow
 
 
 
@@ -545,7 +561,7 @@ extends 
 
 EXPECTED_SPLIT_STATES
-private static RegionState.State[] EXPECTED_SPLIT_STATES
+private static RegionState.State[] EXPECTED_SPLIT_STATES
 
 
 
@@ -562,7 +578,7 @@ extends 
 
 SplitTableRegionProcedure
-public SplitTableRegionProcedure()
+public SplitTableRegionProcedure()
 
 
 
@@ -571,7 +587,7 @@ extends 
 
 SplitTableRegionProcedure
-public SplitTableRegionProcedure(MasterProcedureEnv env,
+public SplitTableRegionProcedure(MasterProcedureEnv env,
  HRegionInfo regionToSplit,
  byte[] splitRow)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -589,16 +605,22 @@ extends 
+
 
 
 
 
-checkSplitRow
-private static void checkSplitRow(HRegionInfo regionToSplit,
-  byte[] splitRow)
-   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+checkSplittable
+private void checkSplittable(MasterProcedureEnv env,
+ HRegionInfo regionToSplit,
+ byte[] splitRow)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+Check whether the region is splittable
 
+Parameters:
+env - MasterProcedureEnv
+regionToSplit - parent

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 
PipelineAckStatusGetter {
-139Stat

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.Mast

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(List servers) throws IOException {
-4039final 
List pbServers = new 
ArrayList<>(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public List 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallable>(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public List 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064List servers = 
new ArrayList<>();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
-4076final 
List pbServers = new 
ArrayList<>(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public List 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallable>(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public List 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056List servers = 
new ArrayList<>();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
+4068executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public List 
listReplicatedTableCFs() throws IOException {
+4079List 
replicatedTableCFs = new ArrayList<>();
+4080HTableDescriptor[] tables = 
listTables();
+4081

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.html
index 1d5c1db..84104c6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/ImportTsv.html
@@ -71,732 +71,734 @@
 063import 
com.google.common.base.Preconditions;
 064import com.google.common.base.Splitter;
 065import com.google.common.collect.Lists;
-066
-067/**
-068 * Tool to import data from a TSV file.
-069 *
-070 * This tool is rather simplistic - it 
doesn't do any quoting or
-071 * escaping, but is useful for many data 
loads.
-072 *
-073 * @see ImportTsv#usage(String)
-074 */
-075@InterfaceAudience.Public
-076public class ImportTsv extends Configured 
implements Tool {
-077
-078  protected static final Log LOG = 
LogFactory.getLog(ImportTsv.class);
+066import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+067import 
org.apache.hadoop.hbase.client.TableDescriptor;
+068
+069/**
+070 * Tool to import data from a TSV file.
+071 *
+072 * This tool is rather simplistic - it 
doesn't do any quoting or
+073 * escaping, but is useful for many data 
loads.
+074 *
+075 * @see ImportTsv#usage(String)
+076 */
+077@InterfaceAudience.Public
+078public class ImportTsv extends Configured 
implements Tool {
 079
-080  final static String NAME = 
"importtsv";
+080  protected static final Log LOG = 
LogFactory.getLog(ImportTsv.class);
 081
-082  public final static String 
MAPPER_CONF_KEY = "importtsv.mapper.class";
-083  public final static String 
BULK_OUTPUT_CONF_KEY = "importtsv.bulk.output";
-084  public final static String 
TIMESTAMP_CONF_KEY = "importtsv.timestamp";
-085  public final static String 
JOB_NAME_CONF_KEY = "mapreduce.job.name";
-086  // TODO: the rest of these configs are 
used exclusively by TsvImporterMapper.
-087  // Move them out of the tool and let 
the mapper handle its own validation.
-088  public final static String 
DRY_RUN_CONF_KEY = "importtsv.dry.run";
-089  // If true, bad lines are logged to 
stderr. Default: false.
-090  public final static String 
LOG_BAD_LINES_CONF_KEY = "importtsv.log.bad.lines";
-091  public final static String 
SKIP_LINES_CONF_KEY = "importtsv.skip.bad.lines";
-092  public final static String 
SKIP_EMPTY_COLUMNS = "importtsv.skip.empty.columns";
-093  public final static String 
COLUMNS_CONF_KEY = "importtsv.columns";
-094  public final static String 
SEPARATOR_CONF_KEY = "importtsv.separator";
-095  public final static String 
ATTRIBUTE_SEPERATOR_CONF_KEY = "attributes.seperator";
-096  //This config is used to propagate 
credentials from parent MR jobs which launch
-097  //ImportTSV jobs. SEE 
IntegrationTestImportTsv.
-098  public final static String 
CREDENTIALS_LOCATION = "credentials_location";
-099  final static String DEFAULT_SEPARATOR = 
"\t";
-100  final static String 
DEFAULT_ATTRIBUTES_SEPERATOR = "=>";
-101  final static String 
DEFAULT_MULTIPLE_ATTRIBUTES_SEPERATOR = ",";
-102  final static Class DEFAULT_MAPPER = 
TsvImporterMapper.class;
-103  public final static String 
CREATE_TABLE_CONF_KEY = "create.table";
-104  public final static String 
NO_STRICT_COL_FAMILY = "no.strict";
-105  /**
-106   * If table didn't exist and was 
created in dry-run mode, this flag is
-107   * flipped to delete it when MR ends.
-108   */
-109  private static boolean 
DRY_RUN_TABLE_CREATED;
-110
-111  public static class TsvParser {
-112/**
-113 * Column families and qualifiers 
mapped to the TSV columns
-114 */
-115private final byte[][] families;
-116private final byte[][] qualifiers;
-117
-118private final byte separatorByte;
+082  final static String NAME = 
"importtsv";
+083
+084  public final static String 
MAPPER_CONF_KEY = "importtsv.mapper.class";
+085  public final static String 
BULK_OUTPUT_CONF_KEY = "importtsv.bulk.output";
+086  public final static String 
TIMESTAMP_CONF_KEY = "importtsv.timestamp";
+087  public final static String 
JOB_NAME_CONF_KEY = "mapreduce.job.name";
+088  // TODO: the rest of these configs are 
used exclusively by TsvImporterMapper.
+089  // Move them out of the tool and let 
the mapper handle its own validation.
+090  public final static String 
DRY_RUN_CONF_KEY = "importtsv.dry.run";
+091  // If true, bad lines are logged to 
stderr. Default: false.
+092  public final static String 
LOG_BAD_LINES_CONF_KEY = "importtsv.log.bad.lines";
+093  public final static String 
SKIP_LINES_CONF_KEY = "importtsv.skip.bad.lines";
+094  public final static String 
SKIP_EMPTY_COLUMNS = "importtsv.skip.empty.columns";
+095  public final static String 
COLUMNS_CONF_KEY = "importtsv.columns";
+096  public final static String 
SEPARATOR_CONF_KEY = "importtsv.separator";
+097  public final s

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * 

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file -065 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). +060 * Cacheable Blocks of an {@link HFile} version 2 file. +061 * Version 2 was introduced in hbase-0.92.0. +062 * +063 *

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file +064 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support +065 * for Version 1 was removed in hbase-1.3.0. 066 * 067 *

HFileBlock: Version 2

068 * In version 2, a block is structured as follows: @@ -120,582 +120,582 @@ 112public class HFileBlock implements Cacheable { 113 private static final Log LOG = LogFactory.getLog(HFileBlock.class); 114 -115 /** Type of block. Header field 0. */ -116 private BlockType blockType; -117 -118 /** -119 * Size on disk excluding header, including checksum. Header field 1. -120 * @see Writer#putHeader(byte[], int, int, int, int) -121 */ -122 private int onDiskSizeWithoutHeader; -123 -124 /** -125 * Size of pure data. Does not include header or checksums. Header field 2. -126 * @see Writer#putHeader(byte[], int, int, int, int) -127 */ -128 private int uncompressedSizeWithoutHeader; -129 -130 /** -131 * The offset of the previous block on disk. Header field 3. -132 * @see Writer#putHeader(byte[], int, int, int, int) -133 */ -134 private long prevBlockOffset; -135 -136 /** -137 * Size on disk of header + data. Excludes checksum. Header field 6, -138 * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. -139 * @see Writer#putHeader(byte[], int, int, int, int) -140 */ -141 private int onDiskDataSizeWithHeader; -142 -143 -144 /** -145 * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by -146 * a single ByteBuffer or by many. Make no assumptions. -147 * -148 *

Be careful reading from this buf. Duplicate and work on the duplicate or if -149 * not, be sure to reset position and limit else trouble down the road. -150 * -151 *

TODO: Make this read-only once made. -152 * -153 *

We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have -154 * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. -155 * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be -156 * good if could be confined to cache-use only but hard-to-do. -157 */ -158 private ByteBuff buf; -159 -160 /** Meta data that holds meta information on the hfileblock. -161 */ -162 private HFileContext fileContext; -163 -164 /** -165 * The offset of this block in the file. Populated by the reader for -166 * convenience of access. This offset is not part of the block header. -167 */ -168 private long offset = UNSET; -169 -170 private MemoryType memType = MemoryType.EXCLUSIVE; -171 -172 /** -173 * The on-disk size of the next block, including the header and checksums if present, obtained by -174 * peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's -175 * header, or UNSET if unknown. -176 * -177 * Blocks try to carry the size of the next block to read in this data member. They will even have -178 * this value when served from cache. Could save a seek in the case where we are iterating through -179 * a file and some of the blocks come from cache. If from cache, then having this info to hand -180 * will save us doing a seek to read the header so we can read the body of a block. -181 * TODO: see how effective this is at saving seeks. -182 */ -183 private int nextBlockOnDiskSize = UNSET; -184 -185 /** -186 * On a checksum failure, do these many succeeding read requests using hdfs checksums before -187 * auto-reenabling hbase checksum verification. -188 */ -189 static final int CHECKSUM_VERIFIC


[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
new file mode 100644
index 000..6e29bdd
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
@@ -0,0 +1,385 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+AsyncAdminBuilder (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Interface 
AsyncAdminBuilder
+
+
+
+
+
+
+All Known Implementing Classes:
+AsyncAdminBuilderBase
+
+
+
+@InterfaceAudience.Public
+public interface AsyncAdminBuilder
+For creating AsyncAdmin. The implementation 
should have default configurations set before
+ returning the builder to user. So users are free to only set the configs they 
care about to
+ create a new AsyncAdmin instance.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Abstract Methods Default Methods 
+
+Modifier and Type
+Method and Description
+
+
+T
+build()
+Create a AsyncAdmin 
instance.
+
+
+
+AsyncAdminBuilder
+setMaxAttempts(int maxAttempts)
+Set the max attempt times for an admin operation.
+
+
+
+default AsyncAdminBuilder
+setMaxRetries(int maxRetries)
+Set the max retry times for an admin operation.
+
+
+
+AsyncAdminBuilder
+setOperationTimeout(long timeout,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set timeout for a whole admin operation.
+
+
+
+AsyncAdminBuilder
+setRetryPause(long timeout,
+ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set the base pause time for retrying.
+
+
+
+AsyncAdminBuilder
+setRpcTimeout(long timeout,
+ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set timeout for each rpc request.
+
+
+
+AsyncAdminBuilder
+setStartLogErrorsCnt(int startLogErrorsCnt)
+Set the number of retries that are allowed before we start 
to log.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+setOperationTimeout
+AsyncAdminBuilder setOperationTimeout(long timeout,
+ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set timeout for a whole admin operation. Operation timeout 
and max attempt times(or max retry
+ times) are both limitations for retrying, we will stop retrying when we reach 
any of the
+ limitations.
+
+Parameters:
+timeout - 
+unit - 
+Returns:
+this for invocation chaining
+
+
+
+
+
+
+
+
+setRpcTimeout
+AsyncAdminBuilder setRpcTimeout(long timeout,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set timeout for each rpc request.
+
+Parameters:
+timeout - 
+unit - 
+Returns:
+this for invocation chaining
+
+
+
+
+
+
+
+
+setRetryPause
+AsyncAdminBuilder setRetryPause(long timeout,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+Set the base pause time for retrying. We use an exponential 
policy to generate sleep time when
+ retrying.
+
+Parameters:
+timeout - 
+unit -

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 
org.apache

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.TableOperator.html
index 7173f6a..0e1420e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.TableOperator.html
@@ -105,7 +105,7 @@
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 AsyncHBaseAdmin.batchTableOperations(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern,
 AsyncHBaseAdmin.TableOperator operator,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String operationType) 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
index 21a80de..a5ff0bb 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
@@ -127,15 +127,6 @@ the order they are declared.
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncHBaseAdmin.compact(TableName tableName,
-   byte[] columnFamily,
-   boolean major,
-   CompactType compactType)
-Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
-
-
-
 private void
 HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
@@ -144,7 +135,7 @@ the order they are declared.
 Compact a table.
 
 
-
+
 void
 Admin.compact(TableName tableName,
byte[] columnFamily,
@@ -152,7 +143,7 @@ the order they are declared.
 Compact a column family within a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
@@ -160,20 +151,29 @@ the order they are declared.
 Compact a column family within a table.
 
 
-
+
 void
 Admin.compact(TableName tableName,
CompactType compactType)
 Compact a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableName tableName,
CompactType compactType)
 Compact a table.
 
 
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+AsyncHBaseAdmin.compact(TableName tableName,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in 
java.util">Optional columnFamily,
+   boolean major,
+   CompactType compactType)
+Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
+
+
 
 CompactionState
 Admin.getCompactionState(TableName tableName,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncTable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncTable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncTable.html
index be3a1a3..52ea343 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RawAsyncTable.html
@@ -125,31 +125,40 @@
int rowUpperLimit) 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index 49714a2..d0f1508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -172,1438 +172,1562 @@
 164Map> clusterState;
 165
 166protected final RackManager 
rackManager;
-167
-168protected Cluster(
-169Map> clusterState,
-170Map> loads,
-171RegionLocationFinder 
regionFinder,
-172RackManager rackManager) {
-173  this(null, clusterState, loads, 
regionFinder, rackManager);
-174}
-175
-176@SuppressWarnings("unchecked")
-177protected Cluster(
-178Collection 
unassignedRegions,
-179Map> clusterState,
-180Map> loads,
-181RegionLocationFinder 
regionFinder,
-182RackManager rackManager) {
-183
-184  if (unassignedRegions == null) {
-185unassignedRegions = 
EMPTY_REGION_LIST;
-186  }
+167// Maps region -> rackIndex -> 
locality of region on rack
+168private float[][] rackLocalities;
+169// Maps localityType -> region 
-> [server|rack]Index with highest locality
+170private int[][] 
regionsToMostLocalEntities;
+171
+172protected Cluster(
+173Map> clusterState,
+174Map> loads,
+175RegionLocationFinder 
regionFinder,
+176RackManager rackManager) {
+177  this(null, clusterState, loads, 
regionFinder, rackManager);
+178}
+179
+180@SuppressWarnings("unchecked")
+181protected Cluster(
+182Collection 
unassignedRegions,
+183Map> clusterState,
+184Map> loads,
+185RegionLocationFinder 
regionFinder,
+186RackManager rackManager) {
 187
-188  serversToIndex = new 
HashMap<>();
-189  hostsToIndex = new 
HashMap<>();
-190  racksToIndex = new 
HashMap<>();
-191  tablesToIndex = new 
HashMap<>();
-192
-193  //TODO: We should get the list of 
tables from master
-194  tables = new ArrayList<>();
-195  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+188  if (unassignedRegions == null) {
+189unassignedRegions = 
EMPTY_REGION_LIST;
+190  }
+191
+192  serversToIndex = new 
HashMap<>();
+193  hostsToIndex = new 
HashMap<>();
+194  racksToIndex = new 
HashMap<>();
+195  tablesToIndex = new 
HashMap<>();
 196
-197  numRegions = 0;
-198
-199  List> 
serversPerHostList = new ArrayList<>();
-200  List> 
serversPerRackList = new ArrayList<>();
-201  this.clusterState = clusterState;
-202  this.regionFinder = regionFinder;
-203
-204  // Use servername and port as there 
can be dead servers in this list. We want everything with
-205  // a matching hostname and port to 
have the same index.
-206  for (ServerName sn : 
clusterState.keySet()) {
-207if (sn == null) {
-208  LOG.warn("TODO: Enable TRACE on 
BaseLoadBalancer. Empty servername); " +
-209  "skipping; unassigned 
regions?");
-210  if (LOG.isTraceEnabled()) {
-211LOG.trace("EMPTY SERVERNAME " 
+ clusterState.toString());
-212  }
-213  continue;
-214}
-215if 
(serversToIndex.get(sn.getAddress().toString()) == null) {
-216  
serversToIndex.put(sn.getHostAndPort(), numServers++);
-217}
-218if 
(!hostsToIndex.containsKey(sn.getHostname())) {
-219  
hostsToIndex.put(sn.getHostname(), numHosts++);
-220  serversPerHostList.add(new 
ArrayList<>(1));
+197  //TODO: We should get the list of 
tables from master
+198  tables = new ArrayList<>();
+199  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+200
+201  numRegions = 0;
+202
+203  List> 
serversPerHostList = new ArrayList<>();
+204  List> 
serversPerRackList = new ArrayList<>();
+205  this.clusterState = clusterState;
+206  this.regionFinder = regionFinder;
+207
+208  // Use servername and port as there 
can be dead servers in this list. We want everything with
+209  // a matching hostname and port to 
have the same index.
+210  for (ServerName sn : 
clusterState.keySet()

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index e9fdf8f..8cc30f5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -393,58 +393,58 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private HMaster m_master
 
 
-
+
 
 
 
 
-m_serverManager
-private ServerManager m_serverManager
+m_deadServers
+private http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-private boolean m_serverManager__IsNotDefault
+m_deadServers__IsNotDefault
+private boolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-private boolean m_catalogJanitorEnabled
+m_serverManager
+private ServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-private boolean m_catalogJanitorEnabled__IsNotDefault
+m_serverManager__IsNotDefault
+private boolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-private AssignmentManager m_assignmentManager
+m_servers
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-private boolean m_assignmentManager__IsNotDefault
+m_servers__IsNotDefault
+private boolean m_servers__IsNotDefault
 
 
 
@@ -465,58 +465,58 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private boolean m_metaLocation__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-private http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
+m_format
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-private boolean m_deadServers__IsNotDefault
+m_format__IsNotDefault
+private boolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
+m_assignmentManager
+private AssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-private boolean m_frags__IsNotDefault
+m_assignmentManager__IsNotDefault
+private boolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
+m_catalogJanitorEnabled
+private boolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-private boolean m_format__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+private boolean m_catalogJanitorEnabled__IsNotDefault
 
 
 
@@ -537,22 +537,22 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private boolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
+m_frags
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-private boolean m_servers__IsNotDefault
+m_frags__IsNotDefault
+private boolean m_frags__IsNotDefault
 
 
 
@@ -598,85 +598,85 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 public HMaster getMaster()
 
 
-
+
 
 
 
 
-setServerManager
-public void setServerManager(ServerManager serverManager)
+setDeadServers
+public void setDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set deadServers)
 
 
-
+
 
 
 
 
-getServerManager
-public ServerManager getServerManager()
+getDeadServers
+public http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index c4701da..9411283 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -128,56 +128,59 @@
 
 
 
+org.apache.hadoop.hbase.HColumnDescriptor
+
+
 org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter
 use the "hbase wal" 
command
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
+
+org.apache.hadoop.hbase.client.ImmutableHColumnDescriptor
+
 
 org.apache.hadoop.hbase.client.ImmutableHTableDescriptor
 
 
-org.apache.hadoop.hbase.client.ImmutableHTableDescriptor.UnmodifyableTableDescriptor
-
-
 org.apache.hadoop.hbase.KeyValue.KVComparator
 : Use CellComparator.
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.MetaComparator
 : CellComparator.META_COMPARATOR
 to be used
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.RawBytesComparator
 Not to be used for any 
comparsions
 
 
-
+
 org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin
 use Admin instead.
 
 
-
+
 org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
 Not used
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData
 Unused
 
@@ -552,37 +555,55 @@
 
 
 
+org.apache.hadoop.hbase.HTableDescriptor.getColumnFamilies()
+
+
 org.apache.hadoop.hbase.HColumnDescriptor.getCompactionCompression()
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  (https://issues.apache.org/jira/browse/HBASE-13655";>HBASE-13655).
  Use HColumnDescriptor.getCompactionCompressionType().
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getComparator()
 Use 
Region#getCellComparator().  deprecated for hbase 2.0, remove for hbase 
3.0
 
 
-
+
 org.apache.hadoop.hbase.HColumnDescriptor.getCompression()
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  (https://issues.apache.org/jira/browse/HBASE-13655";>HBASE-13655).
  Use HColumnDescriptor.getCompressionType().
 
 
-
+
 org.apache.hadoop.hbase.ClusterStatus.getDeadServers()
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  (https://issues.apache.org/jira/browse/HBASE-13656";>HBASE-13656).
  Use ClusterStatus.getDeadServersSize().
 
 
-
+
 org.apache.hadoop.hbase.wal.WAL.getEarliestMemstoreSeqNum(byte[])
 Since version 1.2.0. 
Removing because not used and exposes subtle internal
  workings. Use WAL.getEarliestMemstoreSeqNum(byte[],
 byte[])
 
 
+
+org.apache.hadoop.hbase.HTableDescriptor.getFamilies()
+Use HTableDescriptor.getColumnFamilies().
+
+
+
+org.apache.hadoop.hbase.HTableDescriptor.getFamiliesKeys()
+Use HTableDescriptor.getColumnFamilyNames().
+
+
+
+org.apache.hadoop.hbase.HTableDescriptor.getFamily(byte[])
+Use HTableDescriptor.getColumnFamily(byte[]).
+
+
 
 org.apache.hadoop.hbase.regionserver.Store.getFlushableSize()
 Since 2.0 and will be 
removed in 3.0. Use Store.getSizeToFlush()
 instead.
@@ -841,68 +862,73 @@
 org.apache.hadoop.hbase.KeyValue.isDelete()
 
 
-org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus)
+org.apache.hadoop.hbase.HColumnDescriptor.isLegalFamilyName(byte[])
+Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
+
 
 
+org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus)
+
+
 org.apache.hadoop.hbase.client.ConnectionImplementation.isMasterRunning()
 this has been deprecated 
without a replacement
 
 
-
+
 org.apache.hadoop.hbase.client.ClusterConnection.isMasterRunning()
 this has been deprecated 
without a replacement
 
 
-
+
 org.apache.hadoop.hbase.client.Result.isPartial()
 the word 'partial' 
ambiguous, use Result.mayHaveMoreCellsInRow()
 instead.
  Deprecated since 1.4.0.
 
 
-
+
 org.apache.hadoop.hbase.client.Scan.isSmall()
 since 2.0.0. See the 
comment of Scan.setSmall(boolean)
 
 
-
+
 org.apache.hadoop.hbase.security.visibility.VisibilityClient.listLabels(Configuration,
 String)
 Use VisibilityClient.listLabels(Connection,String)
 instead.
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeerConfigs()
 use Admin.listReplicationPeers()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listReplicated()
 use Admin.listReplicatedTableCFs()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listReplicationPeers()
 use Admin.listReplicationPeers()
 instead
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.matchingRow(Cell,
 Cell)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  In

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-frame.html
--
diff --git 
a/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-frame.html
 
b/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-frame.html
index dc143c7..b96138a 100644
--- 
a/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-frame.html
+++ 
b/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-frame.html
@@ -4,7 +4,7 @@
 
 
 
-org.apache.hbase.archetypes.exemplars.shaded_client (Apache HBase 
2.0.0-SNAPSHOT API)
+org.apache.hbase.archetypes.exemplars.shaded_client (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-summary.html
--
diff --git 
a/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-summary.html
 
b/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-summary.html
index 4119b87..e992db2 100644
--- 
a/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-summary.html
+++ 
b/apidocs/org/apache/hbase/archetypes/exemplars/shaded_client/package-summary.html
@@ -4,7 +4,7 @@
 
 
 
-org.apache.hbase.archetypes.exemplars.shaded_client (Apache HBase 
2.0.0-SNAPSHOT API)
+org.apache.hbase.archetypes.exemplars.shaded_client (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-org.apache.hbase.archetypes.exemplars.shaded_client Class Hierarchy 
(Apache HBase 2.0.0-SNAPSHOT API)
+org.apache.hbase.archetypes.exemplars.shaded_client Class Hierarchy 
(Apache HBase 3.0.0-SNAPSHOT API)