[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
index fe83874..41f6202 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
@@ -1064,6 +1064,14 @@ extends 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TOTAL_ROW_ACTION_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 UPDATES_BLOCKED_DESC
 
 
@@ -1701,13 +1709,39 @@ extends 
 
 
+
+
+
+
+
+TOTAL_ROW_ACTION_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TOTAL_ROW_ACTION_REQUEST_COUNT
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 READ_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1720,7 +1754,7 @@ extends 
 
 READ_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1733,7 +1767,7 @@ extends 
 
 FILTERED_READ_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1746,7 +1780,7 @@ extends 
 
 FILTERED_READ_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1759,7 +1793,7 @@ extends 
 
 WRITE_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1772,7 +1806,7 @@ extends 
 
 WRITE_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1785,7 +1819,7 @@ extends 
 
 CHECK_MUTATE_FAILED_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1798,7 +1832,7 @@ extends 
 
 CHECK_MUTATE_FAILED_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT_DESC
+static 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
index 67c4119..5ded8a4 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
@@ -113,94 +113,82 @@
 
 
 
-org.apache.hadoop.hbase.coprocessor.example
-
-
-
 org.apache.hadoop.hbase.mapred
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 
-
+
 org.apache.hadoop.hbase.master
 
 
-
+
 org.apache.hadoop.hbase.master.assignment
 
 
-
+
 org.apache.hadoop.hbase.master.balancer
 
 
-
+
 org.apache.hadoop.hbase.master.procedure
 
 
-
+
 org.apache.hadoop.hbase.master.snapshot
 
 
-
+
 org.apache.hadoop.hbase.mob
 
 
-
+
 org.apache.hadoop.hbase.quotas
 
 
-
+
 org.apache.hadoop.hbase.regionserver
 
 
-
+
 org.apache.hadoop.hbase.regionserver.handler
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
-org.apache.hadoop.hbase.rest.client
-
-
 
-org.apache.hadoop.hbase.rest.model
+org.apache.hadoop.hbase.rest.client
 
 
 
-org.apache.hadoop.hbase.rsgroup
+org.apache.hadoop.hbase.rest.model
 
 
 
-org.apache.hadoop.hbase.security.access
+org.apache.hadoop.hbase.rsgroup
 
 
 
-org.apache.hadoop.hbase.security.visibility
-
-
-
 org.apache.hadoop.hbase.snapshot
 
 
-
+
 org.apache.hadoop.hbase.tool
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
@@ -713,12 +701,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HTableDescriptor
-HTable.getTableDescriptor()
-Gets the table descriptor for 
this table.
-
-
-
-HTableDescriptor
 Table.getTableDescriptor()
 Deprecated.
 since 2.0 version and will 
be removed in 3.0 version.
@@ -726,6 +708,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+
+HTableDescriptor
+HTable.getTableDescriptor()
+Gets the table descriptor for 
this table.
+
+
 
 HTableDescriptor
 HTableWrapper.getTableDescriptor()
@@ -1138,110 +1126,28 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 default void
-MasterObserver.postCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
- 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
- HTableDescriptorhTableDescriptor)
-Called after a snapshot clone operation has been 
requested.
-
-
-
-default void
-MasterObserver.postCompletedCreateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
-  HTableDescriptordesc,
-  HRegionInfo[]regions)
-Called after the createTable operation has been 
requested.
-
-
-
-default void
-MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
-  TableNametableName,
-  HTableDescriptorhtd)
-Called after to modifying a table's properties.
-
-
-
-default void
-MasterObserver.postCreateTable(ObserverContextMasterCoprocessorEnvironmentctx,
-   HTableDescriptordesc,
-   HRegionInfo[]regions)
-Called after the createTable operation has been 
requested.
-
-
-
-default void
 MasterObserver.postCreateTableHandler(ObserverContextMasterCoprocessorEnvironmentctx,
   HTableDescriptordesc,
   HRegionInfo[]regions)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
(https://issues.apache.org/jira/browse/HBASE-15575;>HBASE-15575).
-   Use MasterObserver.postCompletedCreateTableAction(ObserverContext,
 HTableDescriptor, HRegionInfo[])
+   Use MasterObserver.postCompletedCreateTableAction(ObserverContext,
 TableDescriptor, HRegionInfo[])
 
 
 
 
 default void
-MasterObserver.postModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
-   TableNametableName,
-   HTableDescriptorhtd)
-Called after the modifyTable operation has been 
requested.
-
-
-
-default void
 MasterObserver.postModifyTableHandler(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
   HTableDescriptorhtd)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  (https://issues.apache.org/jira/browse/HBASE-13645;>HBASE-13645).
- Use MasterObserver.postCompletedModifyTableAction(ObserverContext,
 TableName, HTableDescriptor).
+ Use MasterObserver.postCompletedModifyTableAction(ObserverContext,
 TableName, 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }
-458}

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
index 7c7f362..cf1ba81 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class WALSplitter.RegionEntryBuffer
+public static class WALSplitter.RegionEntryBuffer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HeapSize
 A buffer of some number of edits for a given region.
@@ -243,7 +243,7 @@ implements 
 
 heapInBuffer
-long heapInBuffer
+long heapInBuffer
 
 
 
@@ -252,7 +252,7 @@ implements 
 
 entryBuffer
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entry entryBuffer
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entry entryBuffer
 
 
 
@@ -261,7 +261,7 @@ implements 
 
 tableName
-TableName tableName
+TableName tableName
 
 
 
@@ -270,7 +270,7 @@ implements 
 
 encodedRegionName
-byte[] encodedRegionName
+byte[] encodedRegionName
 
 
 
@@ -287,7 +287,7 @@ implements 
 
 RegionEntryBuffer
-RegionEntryBuffer(TableNametableName,
+RegionEntryBuffer(TableNametableName,
   byte[]region)
 
 
@@ -305,7 +305,7 @@ implements 
 
 appendEntry
-longappendEntry(WAL.Entryentry)
+longappendEntry(WAL.Entryentry)
 
 
 
@@ -314,7 +314,7 @@ implements 
 
 internify
-privatevoidinternify(WAL.Entryentry)
+privatevoidinternify(WAL.Entryentry)
 
 
 
@@ -323,7 +323,7 @@ implements 
 
 heapSize
-publiclongheapSize()
+publiclongheapSize()
 
 Specified by:
 heapSizein
 interfaceHeapSize
@@ -339,7 +339,7 @@ implements 
 
 getEncodedRegionName
-publicbyte[]getEncodedRegionName()
+publicbyte[]getEncodedRegionName()
 
 
 
@@ -348,7 +348,7 @@ implements 
 
 getEntryBuffer
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.EntrygetEntryBuffer()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.EntrygetEntryBuffer()
 
 
 
@@ -357,7 +357,7 @@ implements 
 
 getTableName
-publicTableNamegetTableName()
+publicTableNamegetTableName()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
index 64cb0f4..826b359 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.RegionServerWriter.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class WALSplitter.RegionServerWriter
+private static final class WALSplitter.RegionServerWriter
 extends WALSplitter.SinkWriter
 Private data structure that wraps a receiving RS and 
collecting statistics about the data
  written to this newly assigned RS.
@@ -224,7 +224,7 @@ extends 
 
 sink
-finalWALEditsReplaySink sink
+finalWALEditsReplaySink sink
 
 
 
@@ -241,7 +241,7 @@ extends 
 
 RegionServerWriter
-RegionServerWriter(org.apache.hadoop.conf.Configurationconf,
+RegionServerWriter(org.apache.hadoop.conf.Configurationconf,
TableNametableName,
ClusterConnectionconn)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -265,7 +265,7 @@ extends 
 
 close
-voidclose()
+voidclose()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
index 39e01f1..01d8d07 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 826e912..9f31bd3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -51,1973 +51,1976 @@
 043import 
org.apache.hadoop.hbase.TableName;
 044import 
org.apache.hadoop.hbase.UnknownRegionException;
 045import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-046import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-047import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-048import 
org.apache.hadoop.hbase.client.TableState;
-049import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-050import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-051import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-052import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-053import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-054import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-055import 
org.apache.hadoop.hbase.ipc.QosPriority;
-056import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-057import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-058import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-059import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-061import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-062import 
org.apache.hadoop.hbase.mob.MobUtils;
-063import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-064import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-065import 
org.apache.hadoop.hbase.procedure2.Procedure;
-066import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-067import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-068import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-069import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-070import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.security.User;
-075import 
org.apache.hadoop.hbase.security.access.AccessController;
-076import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-077import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-078import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.*;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-099import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 5479fb1..49ef112 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -123,3388 +123,3334 @@
 115import 
org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
 116import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
 117import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-118import 
org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure;
-119import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-120import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-121import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-122import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-123import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-124import 
org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-127import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-128import 
org.apache.hadoop.hbase.master.replication.ReplicationManager;
-129import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-130import 
org.apache.hadoop.hbase.mob.MobConstants;
-131import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-132import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-133import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-134import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-135import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-136import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-137import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-138import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-139import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-140import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
-142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-146import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-147import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-148import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-149import 
org.apache.hadoop.hbase.regionserver.HStore;
-150import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-151import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-152import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-154import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-155import 
org.apache.hadoop.hbase.replication.ReplicationException;
-156import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-158import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-159import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-160import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-161import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-162import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-163import 
org.apache.hadoop.hbase.security.UserProvider;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-170import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-171import 
org.apache.hadoop.hbase.util.Addressing;
-172import 
org.apache.hadoop.hbase.util.Bytes;
-173import 
org.apache.hadoop.hbase.util.CompressionTest;
-174import 
org.apache.hadoop.hbase.util.EncryptionTest;
-175import 
org.apache.hadoop.hbase.util.FSUtils;
-176import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-177import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
index afde8cb..5ffc585 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncProcess.html
@@ -761,7 +761,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 submitMultiActions
-CResultAsyncRequestFuturesubmitMultiActions(AsyncProcessTasktask,
+CResultAsyncRequestFuturesubmitMultiActions(AsyncProcessTasktask,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListActionretainedActions,
 longnonceGroup,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">ExceptionlocationErrors,
@@ -775,7 +775,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addAction
-staticvoidaddAction(ServerNameserver,
+staticvoidaddAction(ServerNameserver,
   byte[]regionName,
   Actionaction,
   http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,MultiActionactionsByServer,
@@ -797,7 +797,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 submitAll
-privateCResultAsyncRequestFuturesubmitAll(AsyncProcessTasktask)
+privateCResultAsyncRequestFuturesubmitAll(AsyncProcessTasktask)
 Submit immediately the list of rows, whatever the server 
status. Kept for backward
  compatibility: it allows to be used with the batch interface that return an 
array of objects.
 
@@ -812,7 +812,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkTask
-privateCResultAsyncRequestFuturecheckTask(AsyncProcessTaskCResulttask)
+privateCResultAsyncRequestFuturecheckTask(AsyncProcessTaskCResulttask)
 
 
 
@@ -821,7 +821,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setNonce
-privatevoidsetNonce(NonceGeneratorng,
+privatevoidsetNonce(NonceGeneratorng,
   Rowr,
   Actionaction)
 
@@ -832,7 +832,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkTimeout
-privateintcheckTimeout(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
+privateintcheckTimeout(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
  inttimeout)
 
 
@@ -842,7 +842,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkOperationTimeout
-privateintcheckOperationTimeout(intoperationTimeout)
+privateintcheckOperationTimeout(intoperationTimeout)
 
 
 
@@ -851,7 +851,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkRpcTimeout
-privateintcheckRpcTimeout(intrpcTimeout)
+privateintcheckRpcTimeout(intrpcTimeout)
 
 
 
@@ -860,7 +860,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createAsyncRequestFuture
-CResultAsyncRequestFutureImplCResultcreateAsyncRequestFuture(AsyncProcessTasktask,
+CResultAsyncRequestFutureImplCResultcreateAsyncRequestFuture(AsyncProcessTasktask,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListActionactions,

longnonceGroup)
 
@@ -871,7 +871,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 waitForMaximumCurrentTasks
-protectedvoidwaitForMaximumCurrentTasks(intmax,
+protectedvoidwaitForMaximumCurrentTasks(intmax,
   TableNametableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/InterruptedIOException.html?is-external=true;
 title="class or interface in java.io">InterruptedIOException
 Wait until the async does not have more than max tasks in 
progress.
@@ -887,7 +887,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getLogger
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/function/Consumer.html?is-external=true;
 title="class or interface in 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/replication/ReplicationEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/ReplicationEndpoint.html 
b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationEndpoint.html
index 4e9bdd4..de52857 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/ReplicationEndpoint.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationEndpoint.html
@@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Superinterfaces:
-ReplicationPeerConfigListener, 
com.google.common.util.concurrent.Service
+ReplicationPeerConfigListener, 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
 
 
 All Known Implementing Classes:
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.LimitedPrivate(value="Replication")
 public interface ReplicationEndpoint
-extends com.google.common.util.concurrent.Service, ReplicationPeerConfigListener
+extends 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service, ReplicationPeerConfigListener
 ReplicationEndpoint is a plugin which implements replication
  to other HBase clusters, or other systems. ReplicationEndpoint implementation
  can be specified at the peer creation time by specifying it
@@ -153,11 +153,11 @@ extends com.google.common.util.concurrent.Service, 
-
+
 
 
-Nested classes/interfaces inherited from 
interfacecom.google.common.util.concurrent.Service
-com.google.common.util.concurrent.Service.State
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.Listener,
 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.State
 
 
 
@@ -206,11 +206,11 @@ extends com.google.common.util.concurrent.Service, 
-
+
 
 
-Methods inherited from 
interfacecom.google.common.util.concurrent.Service
-isRunning, start, startAndWait, state, stop, stopAndWait
+Methods inherited from 
interfaceorg.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+addListener, awaitRunning, awaitRunning, awaitTerminated, 
awaitTerminated, failureCause, isRunning, startAsync, state, 
stopAsync
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
index eac14f3..6d5eda0 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
@@ -81,7 +81,7 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
-com.google.common.util.concurrent.AbstractService 
(implements com.google.common.util.concurrent.Service)
+org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AbstractService
 (implements 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service)
 
 org.apache.hadoop.hbase.replication.BaseReplicationEndpoint (implements 
org.apache.hadoop.hbase.replication.ReplicationEndpoint)
 
@@ -161,14 +161,14 @@
 org.apache.hadoop.hbase.replication.ReplicationPeer
 org.apache.hadoop.hbase.replication.ReplicationPeerConfigListener
 
-org.apache.hadoop.hbase.replication.ReplicationEndpoint (also extends 
com.google.common.util.concurrent.Service)
+org.apache.hadoop.hbase.replication.ReplicationEndpoint (also extends 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service)
 
 
 org.apache.hadoop.hbase.replication.ReplicationPeers
 org.apache.hadoop.hbase.replication.ReplicationQueues
 org.apache.hadoop.hbase.replication.ReplicationQueuesClient
 org.apache.hadoop.hbase.replication.ReplicationTracker
-com.google.common.util.concurrent.Service
+org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
 
 org.apache.hadoop.hbase.replication.ReplicationEndpoint (also extends 
org.apache.hadoop.hbase.replication.ReplicationPeerConfigListener)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.html
index cb1c51f..ebadb27 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.html
+++ 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating status codes and 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index b32645e..7968115 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -30,590 +30,590 @@
 022import java.io.IOException;
 023import java.io.InterruptedIOException;
 024import java.util.ArrayList;
-025import java.util.HashMap;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.NavigableSet;
-029import 
java.util.concurrent.CountDownLatch;
-030import 
java.util.concurrent.locks.ReentrantLock;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import org.apache.hadoop.hbase.Cell;
-035import 
org.apache.hadoop.hbase.CellComparator;
-036import 
org.apache.hadoop.hbase.CellUtil;
-037import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.KeyValue;
-040import 
org.apache.hadoop.hbase.KeyValueUtil;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.IsolationLevel;
-043import 
org.apache.hadoop.hbase.client.Scan;
-044import 
org.apache.hadoop.hbase.executor.ExecutorService;
-045import 
org.apache.hadoop.hbase.filter.Filter;
-046import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-047import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-048import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
-049import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
-050import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
-051import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-052import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
-053import 
org.apache.hadoop.hbase.util.CollectionUtils;
-054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+025import java.util.List;
+026import java.util.NavigableSet;
+027import 
java.util.concurrent.CountDownLatch;
+028import 
java.util.concurrent.locks.ReentrantLock;
+029
+030import org.apache.commons.logging.Log;
+031import 
org.apache.commons.logging.LogFactory;
+032import org.apache.hadoop.hbase.Cell;
+033import 
org.apache.hadoop.hbase.CellComparator;
+034import 
org.apache.hadoop.hbase.CellUtil;
+035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.KeyValue;
+038import 
org.apache.hadoop.hbase.KeyValueUtil;
+039import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+040import 
org.apache.hadoop.hbase.client.IsolationLevel;
+041import 
org.apache.hadoop.hbase.client.Scan;
+042import 
org.apache.hadoop.hbase.executor.ExecutorService;
+043import 
org.apache.hadoop.hbase.filter.Filter;
+044import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
+045import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
+046import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
+047import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
+048import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
+049import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
+050import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
+051import 
org.apache.hadoop.hbase.util.CollectionUtils;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
 055
-056import 
com.google.common.annotations.VisibleForTesting;
-057
-058/**
-059 * Scanner scans both the memstore and 
the Store. Coalesce KeyValue stream into Listlt;KeyValuegt;
-060 * for a single row.
-061 * p
-062 * The implementation is not thread safe. 
So there will be no race between next and close. The only
-063 * exception is updateReaders, it will be 
called in the memstore flush thread to indicate that there
-064 * is a flush.
-065 */
-066@InterfaceAudience.Private
-067public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
-068implements KeyValueScanner, 
InternalScanner, ChangedReadersObserver {
-069  private static final Log LOG = 
LogFactory.getLog(StoreScanner.class);
-070  // In unit tests, the store could be 
null
-071  protected final Store store;
-072  private ScanQueryMatcher matcher;
-073  protected KeyValueHeap heap;
-074  private boolean cacheBlocks;
-075
-076  private long countPerRow = 0;
-077  private int storeLimit = -1;
-078  private 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-140import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 441a4c7..7c9f981 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -15358,11 +15358,11 @@
 Increases the given byte size of a Bloom filter until it 
can be folded by
  the given factor.
 
-computeHDFSBlocksDistribution(Configuration,
 HTableDescriptor, HRegionInfo) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
+computeHDFSBlocksDistribution(Configuration,
 TableDescriptor, HRegionInfo) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
 
 This is a helper function to compute HDFS block 
distribution on demand
 
-computeHDFSBlocksDistribution(Configuration,
 HTableDescriptor, HRegionInfo, Path) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
+computeHDFSBlocksDistribution(Configuration,
 TableDescriptor, HRegionInfo, Path) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
 
 This is a helper function to compute HDFS block 
distribution on demand
 
@@ -15966,7 +15966,7 @@
 Configure a MapReduce Job to perform an incremental load 
into the given
  table.
 
-configureIncrementalLoad(Job,
 HTableDescriptor, RegionLocator) - Static method in class 
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
+configureIncrementalLoad(Job,
 TableDescriptor, RegionLocator) - Static method in class 
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
 
 Configure a MapReduce Job to perform an incremental load 
into the given
  table.
@@ -15976,11 +15976,11 @@
 configureIncrementalLoad(Job,
 ListHFileOutputFormat2.TableInfo) - Static method in class 
org.apache.hadoop.hbase.mapreduce.MultiTableHFileOutputFormat
 
 Analogous to
- HFileOutputFormat2.configureIncrementalLoad(Job,
 HTableDescriptor, RegionLocator),
+ HFileOutputFormat2.configureIncrementalLoad(Job,
 TableDescriptor, RegionLocator),
  this function will configure the requisite number of reducers to write HFiles 
for multple
  tables simultaneously
 
-configureIncrementalLoadMap(Job,
 HTableDescriptor) - Static method in class 
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
+configureIncrementalLoadMap(Job,
 TableDescriptor) - Static method in class 
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
 
 configureInfoServer()
 - Method in class org.apache.hadoop.hbase.master.HMaster
 
@@ -18199,11 +18199,11 @@
 
 Create a cache for region to list of servers
 
-createCacheConf(HColumnDescriptor)
 - Method in class org.apache.hadoop.hbase.regionserver.HMobStore
+createCacheConf(ColumnFamilyDescriptor)
 - Method in class org.apache.hadoop.hbase.regionserver.HMobStore
 
 Creates the mob cache config.
 
-createCacheConf(HColumnDescriptor)
 - Method in class org.apache.hadoop.hbase.regionserver.HStore
+createCacheConf(ColumnFamilyDescriptor)
 - Method in class org.apache.hadoop.hbase.regionserver.HStore
 
 Creates the cache config.
 
@@ -18589,7 +18589,7 @@
 
 createEncoder(String)
 - Static method in enum org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
 
-createEncryptionContext(Configuration,
 HColumnDescriptor) - Static method in class 
org.apache.hadoop.hbase.security.EncryptionUtil
+createEncryptionContext(Configuration,
 ColumnFamilyDescriptor) - Static method in class 
org.apache.hadoop.hbase.security.EncryptionUtil
 
 Helper to create an encyption context.
 
@@ -18909,11 +18909,11 @@
 
 createHostAndPortStr(String,
 int) - Static method in class org.apache.hadoop.hbase.util.Addressing
 
-createHRegion(HRegionInfo,
 Path, Configuration, HTableDescriptor, WAL, boolean) - Static 
method in class org.apache.hadoop.hbase.regionserver.HRegion
+createHRegion(HRegionInfo,
 Path, Configuration, TableDescriptor, WAL, boolean) - Static method 
in class org.apache.hadoop.hbase.regionserver.HRegion
 
 Convenience method creating new HRegions.
 
-createHRegion(HRegionInfo,
 Path, Configuration, HTableDescriptor, WAL) - Static method in 
class org.apache.hadoop.hbase.regionserver.HRegion
+createHRegion(HRegionInfo,
 Path, Configuration, TableDescriptor, WAL) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
 
 createHRegionInfos(HTableDescriptor,
 byte[][]) - Static method in class org.apache.hadoop.hbase.util.ModifyRegionUtils
 
@@ -19828,14 +19828,35 @@
 
 createTable(HTableDescriptor)
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version.
+ use Admin.createTable(TableDescriptor)
+
+
+createTable(TableDescriptor)
 - Method in interface org.apache.hadoop.hbase.client.Admin
+
 Creates a new table.
 
 createTable(HTableDescriptor,
 byte[], byte[], int) - Method in interface 
org.apache.hadoop.hbase.client.Admin
 
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version.
+ use 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
index 43db01d..79dc4e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
@@ -235,7 +235,7 @@
 227  public BucketCache(String ioEngineName, 
long capacity, int blockSize, int[] bucketSizes,
 228  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
 229  throws FileNotFoundException, 
IOException {
-230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity);
+230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity, persistencePath);
 231this.writerThreads = new 
WriterThread[writerThreadNum];
 232long blockNumCapacity = capacity / 
blockSize;
 233if (blockNumCapacity = 
Integer.MAX_VALUE) {
@@ -317,1229 +317,1230 @@
 309   * Get the IOEngine from the IO engine 
name
 310   * @param ioEngineName
 311   * @param capacity
-312   * @return the IOEngine
-313   * @throws IOException
-314   */
-315  private IOEngine 
getIOEngineFromName(String ioEngineName, long capacity)
-316  throws IOException {
-317if (ioEngineName.startsWith("file:") 
|| ioEngineName.startsWith("files:")) {
-318  // In order to make the usage 
simple, we only need the prefix 'files:' in
-319  // document whether one or multiple 
file(s), but also support 'file:' for
-320  // the compatibility
-321  String[] filePaths = 
ioEngineName.substring(ioEngineName.indexOf(":") + 1)
-322  
.split(FileIOEngine.FILE_DELIMITER);
-323  return new FileIOEngine(capacity, 
filePaths);
-324} else if 
(ioEngineName.startsWith("offheap")) {
-325  return new 
ByteBufferIOEngine(capacity, true);
-326} else if 
(ioEngineName.startsWith("heap")) {
-327  return new 
ByteBufferIOEngine(capacity, false);
-328} else if 
(ioEngineName.startsWith("mmap:")) {
-329  return new 
FileMmapEngine(ioEngineName.substring(5), capacity);
-330} else {
-331  throw new 
IllegalArgumentException(
-332  "Don't understand io engine 
name for cache - prefix with file:, heap or offheap");
-333}
-334  }
-335
-336  /**
-337   * Cache the block with the specified 
name and buffer.
-338   * @param cacheKey block's cache key
-339   * @param buf block buffer
-340   */
-341  @Override
-342  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable buf) {
-343cacheBlock(cacheKey, buf, false, 
false);
-344  }
-345
-346  /**
-347   * Cache the block with the specified 
name and buffer.
-348   * @param cacheKey block's cache key
-349   * @param cachedItem block buffer
-350   * @param inMemory if block is 
in-memory
-351   * @param cacheDataInL1
-352   */
-353  @Override
-354  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable cachedItem, boolean inMemory,
-355  final boolean cacheDataInL1) {
-356cacheBlockWithWait(cacheKey, 
cachedItem, inMemory, wait_when_cache);
-357  }
-358
-359  /**
-360   * Cache the block to ramCache
-361   * @param cacheKey block's cache key
-362   * @param cachedItem block buffer
-363   * @param inMemory if block is 
in-memory
-364   * @param wait if true, blocking wait 
when queue is full
-365   */
-366  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
-367  boolean wait) {
-368if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-369if (!cacheEnabled) {
-370  return;
-371}
-372
-373if (backingMap.containsKey(cacheKey)) 
{
-374  return;
-375}
-376
-377/*
-378 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-379 */
-380RAMQueueEntry re =
-381new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-382if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-383  return;
-384}
-385int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-386BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-387boolean successfulAddition = false;
-388if (wait) {
-389  try {
-390successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-391  } catch (InterruptedException e) 
{
-392
Thread.currentThread().interrupt();
-393  }
-394} else {
-395  successfulAddition = 
bq.offer(re);
-396}
-397if (!successfulAddition) {
-398  ramCache.remove(cacheKey);
-399  cacheStats.failInsert();
-400} else {
-401  

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.html
index d157dbf..da33b58 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -337,7 +337,7 @@ extends 
 
-PrevClass
+PrevClass
 NextClass
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html 
b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
index ee2160f..8c963db 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
new file mode 100644
index 000..efc1a6a
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true;
 title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+
+
+Enclosing class:
+RawAsyncHBaseAdmin
+
+
+
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+tableName
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+admin
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
+  TableNametableName)
+
+
+
+
+
+
+
+
+
+Method 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index e65748d..91a0ffa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -372,1874 +372,1873 @@
 364   * is stored in the name, so the 
returned object should only be used for the fields
 365   * in the regionName.
 366   */
-367  protected static HRegionInfo 
parseRegionInfoFromRegionName(byte[] regionName)
-368throws IOException {
-369byte[][] fields = 
HRegionInfo.parseRegionName(regionName);
-370long regionId =  
Long.parseLong(Bytes.toString(fields[2]));
-371int replicaId = fields.length  3 
? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
-372return new HRegionInfo(
-373  TableName.valueOf(fields[0]), 
fields[1], fields[1], false, regionId, replicaId);
-374  }
-375
-376  /**
-377   * Gets the result in hbase:meta for 
the specified region.
-378   * @param connection connection we're 
using
-379   * @param regionName region we're 
looking for
-380   * @return result of the specified 
region
-381   * @throws IOException
-382   */
-383  public static Result 
getRegionResult(Connection connection,
-384  byte[] regionName) throws 
IOException {
-385Get get = new Get(regionName);
-386
get.addFamily(HConstants.CATALOG_FAMILY);
-387return get(getMetaHTable(connection), 
get);
-388  }
-389
-390  /**
-391   * Get regions from the merge qualifier 
of the specified merged region
-392   * @return null if it doesn't contain 
merge qualifier, else two merge regions
-393   * @throws IOException
-394   */
-395  @Nullable
-396  public static PairHRegionInfo, 
HRegionInfo getRegionsFromMergeQualifier(
-397  Connection connection, byte[] 
regionName) throws IOException {
-398Result result = 
getRegionResult(connection, regionName);
-399HRegionInfo mergeA = 
getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
-400HRegionInfo mergeB = 
getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
-401if (mergeA == null  mergeB 
== null) {
-402  return null;
-403}
-404return new Pair(mergeA, 
mergeB);
-405 }
-406
-407  /**
-408   * Checks if the specified table 
exists.  Looks at the hbase:meta table hosted on
-409   * the specified server.
-410   * @param connection connection we're 
using
-411   * @param tableName table to check
-412   * @return true if the table exists in 
meta, false if not
-413   * @throws IOException
-414   */
-415  public static boolean 
tableExists(Connection connection,
-416  final TableName tableName)
-417  throws IOException {
-418// Catalog tables always exist.
-419return 
tableName.equals(TableName.META_TABLE_NAME)
-420|| getTableState(connection, 
tableName) != null;
-421  }
-422
-423  /**
-424   * Lists all of the regions currently 
in META.
-425   *
-426   * @param connection to connect with
-427   * @param excludeOfflinedSplitParents 
False if we are to include offlined/splitparents regions,
-428   *
true and we'll leave out offlined regions from returned list
-429   * @return List of all user-space 
regions.
-430   * @throws IOException
-431   */
-432  @VisibleForTesting
-433  public static ListHRegionInfo 
getAllRegions(Connection connection,
-434  boolean 
excludeOfflinedSplitParents)
-435  throws IOException {
-436ListPairHRegionInfo, 
ServerName result;
-437
-438result = 
getTableRegionsAndLocations(connection, null,
-439excludeOfflinedSplitParents);
-440
-441return 
getListOfHRegionInfos(result);
-442
-443  }
-444
-445  /**
-446   * Gets all of the regions of the 
specified table. Do not use this method
-447   * to get meta table regions, use 
methods in MetaTableLocator instead.
-448   * @param connection connection we're 
using
-449   * @param tableName table we're looking 
for
-450   * @return Ordered list of {@link 
HRegionInfo}.
-451   * @throws IOException
-452   */
-453  public static ListHRegionInfo 
getTableRegions(Connection connection, TableName tableName)
-454  throws IOException {
-455return getTableRegions(connection, 
tableName, false);
-456  }
-457
-458  /**
-459   * Gets all of the regions of the 
specified table. Do not use this method
-460   * to get meta table regions, use 
methods in MetaTableLocator instead.
-461   * @param connection connection we're 
using
-462   * @param tableName table we're looking 
for
-463   * @param excludeOfflinedSplitParents 
If true, do not include offlined split
-464   * parents in the return.
-465   * @return Ordered list of {@link 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
index 78816d5..ca6eaae 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
@@ -169,599 +169,601 @@
 161// The current "view" of region space 
use. Used henceforth.
 162final MapHRegionInfo,Long 
reportedRegionSpaceUse = quotaManager.snapshotRegionSizes();
 163if (LOG.isTraceEnabled()) {
-164  LOG.trace("Using " + 
reportedRegionSpaceUse.size() + " region space use reports");
-165}
-166
-167// Remove the "old" region reports
-168pruneOldRegionReports();
-169
-170// Create the stores to track table 
and namespace snapshots
-171
initializeSnapshotStores(reportedRegionSpaceUse);
-172// Report the number of (non-expired) 
region size reports
-173if (metrics != null) {
-174  
metrics.setNumRegionSizeReports(reportedRegionSpaceUse.size());
-175}
-176
-177// Filter out tables for which we 
don't have adequate regionspace reports yet.
-178// Important that we do this after we 
instantiate the stores above
-179// This gives us a set of Tables 
which may or may not be violating their quota.
-180// To be safe, we want to make sure 
that these are not in violation.
-181SetTableName tablesInLimbo = 
tablesWithQuotas.filterInsufficientlyReportedTables(
-182tableSnapshotStore);
-183
-184if (LOG.isTraceEnabled()) {
-185  LOG.trace("Filtered insufficiently 
reported tables, left with " +
-186  reportedRegionSpaceUse.size() + 
" regions reported");
-187}
-188
-189for (TableName tableInLimbo : 
tablesInLimbo) {
-190  final SpaceQuotaSnapshot 
currentSnapshot = tableSnapshotStore.getCurrentState(tableInLimbo);
-191  if 
(currentSnapshot.getQuotaStatus().isInViolation()) {
-192if (LOG.isTraceEnabled()) {
-193  LOG.trace("Moving " + 
tableInLimbo + " out of violation because fewer region sizes were"
-194  + " reported than 
required.");
-195}
-196SpaceQuotaSnapshot targetSnapshot 
= new SpaceQuotaSnapshot(
-197
SpaceQuotaStatus.notInViolation(), currentSnapshot.getUsage(),
-198
currentSnapshot.getLimit());
-199
this.snapshotNotifier.transitionTable(tableInLimbo, targetSnapshot);
-200// Update it in the Table 
QuotaStore so that memory is consistent with no violation.
-201
tableSnapshotStore.setCurrentState(tableInLimbo, targetSnapshot);
-202  }
-203}
-204
-205// Transition each table to/from 
quota violation based on the current and target state.
-206// Only table quotas are enacted.
-207final SetTableName 
tablesWithTableQuotas = tablesWithQuotas.getTableQuotaTables();
-208
processTablesWithQuotas(tablesWithTableQuotas);
-209
-210// For each Namespace quota, 
transition each table in the namespace in or out of violation
-211// only if a table quota violation 
policy has not already been applied.
-212final SetString 
namespacesWithQuotas = tablesWithQuotas.getNamespacesWithQuotas();
-213final 
MultimapString,TableName tablesByNamespace = 
tablesWithQuotas.getTablesByNamespace();
-214
processNamespacesWithQuotas(namespacesWithQuotas, tablesByNamespace);
-215  }
-216
-217  void 
initializeSnapshotStores(MapHRegionInfo,Long regionSizes) {
-218MapHRegionInfo,Long 
immutableRegionSpaceUse = Collections.unmodifiableMap(regionSizes);
-219if (tableSnapshotStore == null) {
-220  tableSnapshotStore = new 
TableQuotaSnapshotStore(conn, this, immutableRegionSpaceUse);
-221} else {
-222  
tableSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-223}
-224if (namespaceSnapshotStore == null) 
{
-225  namespaceSnapshotStore = new 
NamespaceQuotaSnapshotStore(
-226  conn, this, 
immutableRegionSpaceUse);
-227} else {
-228  
namespaceSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-229}
-230  }
-231
-232  /**
-233   * Processes each {@code TableName} 
which has a quota defined and moves it in or out of
-234   * violation based on the space use.
-235   *
-236   * @param tablesWithTableQuotas The 
HBase tables which have quotas defined
-237   */
-238  void processTablesWithQuotas(final 
SetTableName tablesWithTableQuotas) throws IOException {
-239long numTablesInViolation = 0L;
-240for (TableName table : 
tablesWithTableQuotas) {
-241  final SpaceQuota spaceQuota = 
tableSnapshotStore.getSpaceQuota(table);
-242  if (spaceQuota == null) {
-243if (LOG.isDebugEnabled()) {
-244  LOG.debug("Unexpectedly did not 
find a space quota 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index b38706f..88ee38a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -1813,89 +1813,102 @@ service.
 
 
 
+static class
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+An ModifyableFamilyDescriptor contains information about a 
column family such as the
+ number of versions, compression settings, etc.
+
+
+
 (package private) class
 CompleteScanResultCache
 A scan result cache that only returns complete result.
 
 
-
+
 class
 ConnectionConfiguration
 Configuration parameters for the connection.
 
 
-
+
 (package private) class
 ConnectionImplementation
 Main implementation of Connection 
and ClusterConnection 
interfaces.
 
 
-
+
 class
 ConnectionUtils
 Utility used by client connections.
 
 
-
+
 class
 CoprocessorHConnection
 Connection to an HTable from within a Coprocessor.
 
 
-
+
 class
 DelayingRunner
 A wrapper for a runnable for a group of actions for a 
single regionserver.
 
 
-
+
 (package private) class
 FailureInfo
 Keeps track of repeated failures to any region server.
 
 
-
+
 (package private) class
 FastFailInterceptorContext
 
-
+
 class
 FlushRegionCallable
 A Callable for flushRegion() RPC.
 
 
-
+
 class
 HBaseAdmin
 HBaseAdmin is no longer a client API.
 
 
-
+
 protected static class
 HBaseAdmin.NamespaceFuture
 
-
+
 protected static class
 HBaseAdmin.ProcedureFutureV
 Future that waits on a procedure result.
 
 
-
+
 protected static class
 HBaseAdmin.TableFutureV
 
-
+
 class
 HRegionLocator
 An implementation of RegionLocator.
 
 
-
+
 class
 HTable
 An implementation of Table.
 
 
+
+class
+ImmutableHColumnDescriptor
+Deprecated.
+
+
 
 class
 MetaCache
@@ -2218,11 +2231,73 @@ service.
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.BLOCKCACHE
+Key for the BLOCKCACHE attribute.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.BLOCKSIZE
+Size of storefile/hfile 'blocks'.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.BLOOMFILTER
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1
+Key for cache data into L1 if cache is set up with more 
than one tier.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TableDescriptorBuilder.COMPACTION_ENABLED
 Used by HBase Shell interface to access this metadata
  attribute which denotes if the table is compaction enabled.
 
 
+
+static http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorTableDescriptor
+TableDescriptor.COMPARATOR
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorColumnFamilyDescriptor
+ColumnFamilyDescriptor.COMPARATOR
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.COMPRESS_TAGS
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.COMPRESSION
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT
+
+
+static 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 09d60f4..1a5d42a 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-2227
+2228
 0
 0
-14468
+14494
 
 Files
 
@@ -662,12 +662,12 @@
 org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 0
 0
-10
+12
 
 org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 0
 0
-58
+60
 
 org/apache/hadoop/hbase/backup/impl/BackupManager.java
 0
@@ -682,7 +682,7 @@
 org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 0
 0
-40
+41
 
 org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
 0
@@ -1742,7 +1742,7 @@
 org/apache/hadoop/hbase/filter/FilterList.java
 0
 0
-37
+53
 
 org/apache/hadoop/hbase/filter/FilterWrapper.java
 0
@@ -4337,7 +4337,7 @@
 org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 0
 0
-132
+133
 
 org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
 0
@@ -4427,7 +4427,7 @@
 org/apache/hadoop/hbase/regionserver/ScannerContext.java
 0
 0
-12
+14
 
 org/apache/hadoop/hbase/regionserver/ScannerIdGenerator.java
 0
@@ -4757,7 +4757,7 @@
 org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
 0
 0
-7
+8
 
 org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
 0
@@ -6117,7 +6117,7 @@
 org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 0
 0
-234
+235
 
 org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 0
@@ -7209,12 +7209,12 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly;>LeftCurly
-355
+356
 Error
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1975
+1976
 Error
 
 coding
@@ -7267,7 +7267,7 @@
 
 imports
 http://checkstyle.sourceforge.net/config_imports.html#AvoidStarImport;>AvoidStarImport
-13
+14
 Error
 
 
@@ -7288,7 +7288,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-81
+84
 Error
 
 indentation
@@ -7299,19 +7299,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-5080
+5096
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-791
+779
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3206
+3218
 Error
 
 misc
@@ -7329,7 +7329,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-863
+865
 Error
 
 
@@ -7344,12 +7344,12 @@
 
 
 http://checkstyle.sourceforge.net/config_whitespace.html#MethodParamPad;>MethodParamPad
-197
+198
 Error
 
 
 http://checkstyle.sourceforge.net/config_whitespace.html#ParenPad;>ParenPad
-110
+111
 Error
 
 Details
@@ -13004,58 +13004,70 @@
 51
 
 Error
+sizes
+LineLength
+Line is longer than 100 characters (found 101).
+112
+
+Error
+indentation
+Indentation
+'if' child have incorrect indentation level 10, expected level should be 
8.
+129
+
+Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
-121
+181
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 104).
-157
+217
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 105).
-158
+218
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-163
+223
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-279
+341
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-330
+392
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-341
+403
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-346
+408
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-381
+443
 
 org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
@@ -13070,349 +13082,361 @@
 imports
 ImportOrder
 Wrong order for 'com.google.common.collect.Lists' import.
-63
+64
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-177
+199
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-178
+200
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-179
+201
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-180
+202
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-181
+203
 
 Error
 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index fe8187b..b6e0403 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -211,9 +211,9 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.master.procedure.DisableTableProcedure.MarkRegionOfflineOpResult
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
-org.apache.hadoop.hbase.master.procedure.DisableTableProcedure.MarkRegionOfflineOpResult
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.html 
b/devapidocs/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.html
index 4e1853c..cdea00d 100644
--- a/devapidocs/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -357,7 +357,7 @@ implements org.apache.hadoop.metrics2.MetricsInfo
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/metrics/OperationMetrics.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/metrics/OperationMetrics.html 
b/devapidocs/org/apache/hadoop/hbase/metrics/OperationMetrics.html
new file mode 100644
index 000..e714fcc
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/metrics/OperationMetrics.html
@@ -0,0 +1,415 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+OperationMetrics (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.metrics
+Class OperationMetrics
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.metrics.OperationMetrics
+
+
+
+
+
+
+
+
+public class OperationMetrics
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Container class for commonly collected metrics for most 
operations. Instantiate this class to
+ collect submitted count, failed count and time histogram for an 
operation.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FAILED_COUNT
+
+
+private Counter
+failedCounter
+
+
+private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SUBMITTED_COUNT
+
+
+private Counter
+submittedCounter
+
+
+private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/ipc/RpcExecutor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/RpcExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcExecutor.html
index ad9bbc9..1a6e721 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/RpcExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/RpcExecutor.html
@@ -846,7 +846,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ONE_QUEUE
-private staticRpcExecutor.QueueBalancer ONE_QUEUE
+private staticRpcExecutor.QueueBalancer ONE_QUEUE
 All requests go to the first queue, at index 0
 
 
@@ -1001,7 +1001,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getBalancer
-public staticRpcExecutor.QueueBalancergetBalancer(intqueueSize)
+public staticRpcExecutor.QueueBalancergetBalancer(intqueueSize)
 
 
 
@@ -1010,7 +1010,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isDeadlineQueueType
-public staticbooleanisDeadlineQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
+public staticbooleanisDeadlineQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
 
 
 
@@ -1019,7 +1019,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isCodelQueueType
-public staticbooleanisCodelQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
+public staticbooleanisCodelQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
 
 
 
@@ -1028,7 +1028,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isFifoQueueType
-public staticbooleanisFifoQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
+public staticbooleanisFifoQueueType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallQueueType)
 
 
 
@@ -1037,7 +1037,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNumGeneralCallsDropped
-publiclonggetNumGeneralCallsDropped()
+publiclonggetNumGeneralCallsDropped()
 
 
 
@@ -1046,7 +1046,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNumLifoModeSwitches
-publiclonggetNumLifoModeSwitches()
+publiclonggetNumLifoModeSwitches()
 
 
 
@@ -1055,7 +1055,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getActiveHandlerCount
-publicintgetActiveHandlerCount()
+publicintgetActiveHandlerCount()
 
 
 
@@ -1064,7 +1064,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getActiveWriteHandlerCount
-publicintgetActiveWriteHandlerCount()
+publicintgetActiveWriteHandlerCount()
 
 
 
@@ -1073,7 +1073,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getActiveReadHandlerCount
-publicintgetActiveReadHandlerCount()
+publicintgetActiveReadHandlerCount()
 
 
 
@@ -1082,7 +1082,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getActiveScanHandlerCount
-publicintgetActiveScanHandlerCount()
+publicintgetActiveScanHandlerCount()
 
 
 
@@ -1091,7 +1091,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getQueueLength
-publicintgetQueueLength()
+publicintgetQueueLength()
 Returns the length of the pending queue
 
 
@@ -1101,7 +1101,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getReadQueueLength
-publicintgetReadQueueLength()
+publicintgetReadQueueLength()
 
 
 
@@ -1110,7 +1110,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getScanQueueLength
-publicintgetScanQueueLength()
+publicintgetScanQueueLength()
 
 
 
@@ -1119,7 +1119,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getWriteQueueLength
-publicintgetWriteQueueLength()
+publicintgetWriteQueueLength()
 
 
 
@@ -1128,7 +1128,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getName
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
 
 
 
@@ -1137,7 +1137,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 resizeQueues

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
index e40fc54..313b6ba 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -1020,18 +1020,24 @@
 
 
 
+QuotaStatusCalls
+
+Client class to wrap RPCs to HBase servers for space quota 
status information.
+
+
+
 RawAsyncTableImpl
 
 The implementation of RawAsyncTable.
 
 
-
+
 RegionAdminServiceCallableT
 
 Similar to RegionServerCallable but for the AdminService 
interface.
 
 
-
+
 RegionCoprocessorRpcChannel
 
 Provides clients with an RPC connection to call Coprocessor 
Endpoint
@@ -1039,87 +1045,87 @@
  against a given table region.
 
 
-
+
 RegionCoprocessorRpcChannelImpl
 
 The implementation of a region based coprocessor rpc 
channel.
 
 
-
+
 RegionCoprocessorServiceExec
 
 Represents a coprocessor service method execution against a 
single region.
 
 
-
+
 RegionLoadStats
 
 POJO representing region server load
 
 
-
+
 RegionReplicaUtil
 
 Utility methods which contain the logic for regions and 
replicas.
 
 
-
+
 RegionServerCallableT,S
 
 Implementations make a RPC call against a RegionService via 
a protobuf Service.
 
 
-
+
 RegistryFactory
 
 Get instance of configured Registry.
 
 
-
+
 RequestControllerFactory
 
 A factory class that constructs an RequestController.
 
 
-
+
 Result
 
 Single row result of a Get or Scan query.
 
 
-
+
 ResultBoundedCompletionServiceV
 
 A completion service for the RpcRetryingCallerFactory.
 
 
-
+
 ResultStatsUtil
 
 A Result with some statistics 
about the server/region status
 
 
-
+
 RetriesExhaustedException.ThrowableWithExtraContext
 
 Datastructure that allows adding more info around Throwable 
incident.
 
 
-
+
 RetryingCallerInterceptor
 
 This class is designed to fit into the RetryingCaller class 
which forms the
  central piece of intelligence for the client side retries for most 
calls.
 
 
-
+
 RetryingCallerInterceptorContext
 
 The context object used in the RpcRetryingCaller to enable
  RetryingCallerInterceptor to 
intercept calls.
 
 
-
+
 RetryingCallerInterceptorFactory
 
 Factory implementation to provide the ConnectionImplementation with
@@ -1127,190 +1133,190 @@
  to intercept the RpcRetryingCaller during the 
course of their calls.
 
 
-
+
 RetryingTimeTracker
 
 Tracks the amount of time remaining for an operation.
 
 
-
+
 ReversedClientScanner
 
 A reversed client scanner which support backward 
scanning
 
 
-
+
 ReversedScannerCallable
 
 A reversed ScannerCallable which supports backward 
scanning.
 
 
-
+
 RowMutations
 
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 RpcRetryingCallableV
 
 A RetryingCallable for RPC connection operations.
 
 
-
+
 RpcRetryingCallerFactory
 
 Factory to create an RpcRetryingCaller
 
 
-
+
 RpcRetryingCallerImplT
 
 Runs an rpc'ing RetryingCallable.
 
 
-
+
 RpcRetryingCallerWithReadReplicas
 
 Caller that goes to replica if the primary region does no 
answer within a configurable
  timeout.
 
 
-
+
 Scan
 
 Used to perform Scan operations.
 
 
-
+
 ScannerCallable
 
 Scanner operations such as create, next, etc.
 
 
-
+
 ScannerCallableWithReplicas
 
 This class has the logic for handling scanners for regions 
with and without replicas.
 
 
-
+
 SecureBulkLoadClient
 
 Client proxy for SecureBulkLoadProtocol
 
 
-
+
 ServerStatisticTracker
 
 Tracks the statistics for multiple regions
 
 
-
+
 ShortCircuitMasterConnection
 
 A short-circuit connection that can bypass the RPC layer 
(serialization, deserialization,
  networking, etc..) when talking to a local master
 
 
-
+
 SimpleRequestController
 
 Holds back the requests if they reach any thresholds.
 
 
-
+
 SimpleRequestController.RequestHeapSizeChecker
 
 limit the heap size for each request.
 
 
-
+
 SimpleRequestController.RequestRowsChecker
 
 limit the number of rows for each request.
 
 
-
+
 SimpleRequestController.SubmittedSizeChecker
 
 limit the heapsize of total submitted data.
 
 
-
+
 SimpleRequestController.TaskCountChecker
 
 limit the max number of tasks in an AsyncProcess.
 
 
-
+
 SingleResponse
 
 Class for single action response
 
 
-
+
 SingleResponse.Entry
 
 
-
+
 SnapshotDescription
 
 The POJO equivalent of HBaseProtos.SnapshotDescription
 
 
-
+
 SyncCoprocessorRpcChannel
 
 Base class which provides clients with an RPC connection to
  call coprocessor endpoint Services.
 
 
-
+
 TableBuilderBase
 
 Base class for all table builders.
 
 
-
+
 TableDescriptorBuilder
 
 
-
+
 TableDescriptorBuilder.ModifyableTableDescriptor
 
 TODO: make this private after removing the 
HTableDescriptor
 
 
-
+
 TableSnapshotScanner
 
 A Scanner which performs a scan over snapshot 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 7b99d74..366fcc2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -127,222 +127,219 @@
 119if (family == null) {
 120  throw new 
IllegalArgumentException("family cannot be null");
 121}
-122if (qualifier == null) {
-123  throw new 
IllegalArgumentException("qualifier cannot be null");
-124}
-125ListCell list = 
getCellList(family);
-126KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-127list.add(kv);
-128
familyMap.put(CellUtil.cloneFamily(kv), list);
-129return this;
-130  }
-131
-132  /**
-133   * Gets the TimeRange used for this 
increment.
-134   * @return TimeRange
-135   */
-136  public TimeRange getTimeRange() {
-137return this.tr;
-138  }
-139
-140  /**
-141   * Sets the TimeRange to be used on the 
Get for this increment.
-142   * p
-143   * This is useful for when you have 
counters that only last for specific
-144   * periods of time (ie. counters that 
are partitioned by time).  By setting
-145   * the range of valid times for this 
increment, you can potentially gain
-146   * some performance with a more optimal 
Get operation.
-147   * p
-148   * This range is used as [minStamp, 
maxStamp).
-149   * @param minStamp minimum timestamp 
value, inclusive
-150   * @param maxStamp maximum timestamp 
value, exclusive
-151   * @throws IOException if invalid time 
range
-152   * @return this
-153   */
-154  public Increment setTimeRange(long 
minStamp, long maxStamp)
-155  throws IOException {
-156tr = new TimeRange(minStamp, 
maxStamp);
-157return this;
-158  }
-159  
-160  /**
-161   * @param returnResults True (default) 
if the increment operation should return the results. A
-162   *  client that is not 
interested in the result can save network bandwidth setting this
-163   *  to false.
-164   */
-165  public Increment 
setReturnResults(boolean returnResults) {
-166
super.setReturnResults(returnResults);
-167return this;
-168  }
-169
-170  /**
-171   * @return current setting for 
returnResults
-172   */
-173  // This method makes public the 
superclasses's protected method.
-174  public boolean isReturnResults() {
-175return super.isReturnResults();
-176  }
-177
-178  /**
-179   * Method for retrieving the number of 
families to increment from
-180   * @return number of families
-181   */
-182  @Override
-183  public int numFamilies() {
-184return this.familyMap.size();
-185  }
-186
-187  /**
-188   * Method for checking if any families 
have been inserted into this Increment
-189   * @return true if familyMap is non 
empty false otherwise
-190   */
-191  public boolean hasFamilies() {
-192return !this.familyMap.isEmpty();
-193  }
-194
-195  /**
-196   * Before 0.95, when you called 
Increment#getFamilyMap(), you got back
-197   * a map of families to a list of 
Longs.  Now, {@link #getFamilyCellMap()} returns
-198   * families by list of Cells.  This 
method has been added so you can have the
-199   * old behavior.
-200   * @return Map of families to a Map of 
qualifiers and their Long increments.
-201   * @since 0.95.0
-202   */
-203  public Mapbyte[], 
NavigableMapbyte [], Long getFamilyMapOfLongs() {
-204NavigableMapbyte[], 
ListCell map = super.getFamilyCellMap();
-205Mapbyte [], 
NavigableMapbyte[], Long results = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-206for (Map.Entrybyte [], 
ListCell entry: map.entrySet()) {
-207  NavigableMapbyte [], Long 
longs = new TreeMap(Bytes.BYTES_COMPARATOR);
-208  for (Cell cell: entry.getValue()) 
{
-209
longs.put(CellUtil.cloneQualifier(cell),
-210
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength()));
-211  }
-212  results.put(entry.getKey(), 
longs);
-213}
-214return results;
-215  }
-216
-217  /**
-218   * @return String
-219   */
-220  @Override
-221  public String toString() {
-222StringBuilder sb = new 
StringBuilder();
-223sb.append("row=");
-224
sb.append(Bytes.toStringBinary(this.row));
-225if(this.familyMap.isEmpty()) {
-226  sb.append(", no columns set to be 
incremented");
-227  return sb.toString();
-228}
-229sb.append(", families=");
-230boolean moreThanOne = false;
-231for(Map.Entrybyte [], 
ListCell entry: this.familyMap.entrySet()) {
-232  if(moreThanOne) {
-233sb.append("), ");
-234  } else {
-235moreThanOne = true;
-236sb.append("{");
-237  }
-238  sb.append("(family=");
-239  

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html 
b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
index e9857d4..6d2e7df 100644
--- a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
+++ b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
@@ -243,8 +243,8 @@ implements 
 void
-preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
+preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
 HTableDescriptordesc)
 Called before a snapshot is cloned.
 
@@ -290,7 +290,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddColumn,
 postAddColumnFamily,
 postAddColumnHandler,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup,
 postBalanceSwitch,
 postCloneSnapshot,
 postCompletedAddColumnFamilyAction,
 postComplete
 dCreateTableAction, postCompletedDeleteColumnFamilyAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyColumnFamilyAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postCreateTableHandler,
 postDeleteColumn,
 postDeleteColumnFamily,
 postDeleteColumnHandler,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTableHandler,
 postDisableReplicationPeer,
 postDisableTable,
 postDisableTableHandler, postDispatchMerge,
 postEnableReplicationPeer,
 postEnableTable,
 postEnableTableHandler,
 postGetNamespaceDescriptor,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListLocks, postListNamespaceDescriptors,
 postListProcedures,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyColumn,
 postModifyColumnFamily,
 postModifyColumnHandler,
 postModifyNamespace,
 postModifyTable,
 postModifyTableHandler,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables, postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota, postSetUserQuota,
 postSnapshot,
 postStartMaster,
 po
 stTableFlush, postTruncateTable,
 postTruncateTableHandler,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddColumn,
 preAddColumnFamily,
 preAddColumnFamilyAction,
 preAddColumnHandler,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preCreateTableAction,
 preCreateTableHandler,
 preDeleteColumn,
 preDeleteColumnFamily,
 preDeleteColumnFamilyAction,
 preDeleteColumnHandler,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDeleteTableHandler,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preDisableTableHandler,
 preDispatchMerge,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preEnableTableHandler,
 pre
 GetNamespaceDescriptor, preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListLocks,
 preListNamespaceDescriptors, preListProcedures,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyColumn, preModifyColumnFamily,
 preModifyColumnFamilyAction,
 preModifyColumnHandler,
 preModifyTable, 
preModifyTableAction,
 preModifyTableHandler,
 preMove,
 preMoveServers, preMoveServersAndTables,
 preMoveTables,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota, preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRe
 gion, preSplitRegionAction,
 preSplitRegionAfterPONRAction,
 preSplitRegionBeforePONRAction,
 preStopMaster,
 preTableFlush, preTruncateTable,
 preTruncateTableAction,
 preTruncateTableHandler,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddColumn,
 postAddColumnFamily,
 postAddColumnHandler,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup,
 postBalanceSwitch,
 postCloneSnapshot,
 postCompletedAddColumnFamilyAction,
 postCompl
 etedCreateTableAction, 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.html
new file mode 100644
index 000..4c7720b
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.html
@@ -0,0 +1,278 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.ipc;
+019
+020import io.netty.buffer.ByteBuf;
+021import io.netty.channel.Channel;
+022import 
io.netty.channel.ChannelFutureListener;
+023
+024import java.io.IOException;
+025import java.net.InetAddress;
+026import java.net.InetSocketAddress;
+027import java.nio.ByteBuffer;
+028import java.util.Arrays;
+029
+030import 
org.apache.hadoop.hbase.CellScanner;
+031import 
org.apache.hadoop.hbase.HConstants;
+032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+033import 
org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
+034import 
org.apache.hadoop.hbase.nio.ByteBuff;
+035import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+036import 
org.apache.hadoop.hbase.security.AccessDeniedException;
+037import 
org.apache.hadoop.hbase.security.AuthMethod;
+038import 
org.apache.hadoop.hbase.security.SaslStatus;
+039import 
org.apache.hadoop.hbase.security.SaslUtil;
+040import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
+041import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+042import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
+043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
+044import 
org.apache.hadoop.hbase.util.Bytes;
+045import 
org.apache.hadoop.io.IntWritable;
+046import org.apache.htrace.TraceInfo;
+047
+048/**
+049 * RpcConnection implementation for netty 
rpc server.
+050 */
+051@InterfaceAudience.Private
+052class NettyServerRpcConnection extends 
ServerRpcConnection {
+053
+054  final Channel channel;
+055
+056  NettyServerRpcConnection(NettyRpcServer 
rpcServer, Channel channel) {
+057super(rpcServer);
+058this.channel = channel;
+059InetSocketAddress inetSocketAddress = 
((InetSocketAddress) channel.remoteAddress());
+060this.addr = 
inetSocketAddress.getAddress();
+061if (addr == null) {
+062  this.hostAddress = "*Unknown*";
+063} else {
+064  this.hostAddress = 
inetSocketAddress.getAddress().getHostAddress();
+065}
+066this.remotePort = 
inetSocketAddress.getPort();
+067this.saslCall = new 
NettyServerCall(SASL_CALLID, null, null, null, null, null, this, 0, null,
+068null, System.currentTimeMillis(), 
0, rpcServer.reservoir, rpcServer.cellBlockBuilder, null);
+069this.setConnectionHeaderResponseCall 
= new NettyServerCall(CONNECTION_HEADER_RESPONSE_CALLID,
+070null, null, null, null, null, 
this, 0, null, null, System.currentTimeMillis(), 0,
+071rpcServer.reservoir, 
rpcServer.cellBlockBuilder, null);
+072this.authFailedCall = new 
NettyServerCall(AUTHORIZATION_FAILED_CALLID, null, null, null, null,
+073null, this, 0, null, null, 
System.currentTimeMillis(), 0, rpcServer.reservoir,
+074rpcServer.cellBlockBuilder, 
null);
+075  }
+076
+077  void readPreamble(ByteBuf buffer) 
throws IOException {
+078byte[] rpcHead = { buffer.readByte(), 
buffer.readByte(), buffer.readByte(), buffer.readByte() };
+079if 
(!Arrays.equals(HConstants.RPC_HEADER, rpcHead)) {
+080  doBadPreambleHandling("Expected 
HEADER=" + Bytes.toStringBinary(HConstants.RPC_HEADER) +
+081  " but received HEADER=" + 
Bytes.toStringBinary(rpcHead) + " from " + toString());
+082  return;
+083}
+084// Now read the next two bytes, the 
version and the auth to use.
+085int version = buffer.readByte();
+086byte authbyte = buffer.readByte();
+087this.authMethod = 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
index 2ccefa4..87d7143 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
@@ -141,1653 +141,1703 @@
 133
 134  private final static String 
BACKUP_INFO_PREFIX = "session:";
 135  private final static String 
START_CODE_ROW = "startcode:";
-136  private final static String 
INCR_BACKUP_SET = "incrbackupset:";
-137  private final static String 
TABLE_RS_LOG_MAP_PREFIX = "trslm:";
-138  private final static String 
RS_LOG_TS_PREFIX = "rslogts:";
-139
-140  private final static String 
BULK_LOAD_PREFIX = "bulk:";
-141  private final static byte[] 
BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
-142  final static byte[] TBL_COL = 
Bytes.toBytes("tbl");
-143  final static byte[] FAM_COL = 
Bytes.toBytes("fam");
-144  final static byte[] PATH_COL = 
Bytes.toBytes("path");
-145  final static byte[] STATE_COL = 
Bytes.toBytes("state");
-146  // the two states a bulk loaded file 
can be
-147  final static byte[] BL_PREPARE = 
Bytes.toBytes("R");
-148  final static byte[] BL_COMMIT = 
Bytes.toBytes("D");
-149
-150  private final static String WALS_PREFIX 
= "wals:";
-151  private final static String 
SET_KEY_PREFIX = "backupset:";
-152
-153  // separator between BULK_LOAD_PREFIX 
and ordinals
-154 protected final static String 
BLK_LD_DELIM = ":";
-155  private final static byte[] EMPTY_VALUE 
= new byte[] {};
-156
-157  // Safe delimiter in a string
-158  private final static String NULL = 
"\u";
-159
-160  public BackupSystemTable(Connection 
conn) throws IOException {
-161this.connection = conn;
-162tableName = 
BackupSystemTable.getTableName(conn.getConfiguration());
-163checkSystemTable();
-164  }
+136  private final static byte[] 
ACTIVE_SESSION_ROW = "activesession:".getBytes();
+137  private final static byte[] 
ACTIVE_SESSION_COL = "c".getBytes();
+138
+139  private final static byte[] 
ACTIVE_SESSION_YES = "yes".getBytes();
+140  private final static byte[] 
ACTIVE_SESSION_NO = "no".getBytes();
+141
+142  private final static String 
INCR_BACKUP_SET = "incrbackupset:";
+143  private final static String 
TABLE_RS_LOG_MAP_PREFIX = "trslm:";
+144  private final static String 
RS_LOG_TS_PREFIX = "rslogts:";
+145
+146  private final static String 
BULK_LOAD_PREFIX = "bulk:";
+147  private final static byte[] 
BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
+148  final static byte[] TBL_COL = 
Bytes.toBytes("tbl");
+149  final static byte[] FAM_COL = 
Bytes.toBytes("fam");
+150  final static byte[] PATH_COL = 
Bytes.toBytes("path");
+151  final static byte[] STATE_COL = 
Bytes.toBytes("state");
+152  // the two states a bulk loaded file 
can be
+153  final static byte[] BL_PREPARE = 
Bytes.toBytes("R");
+154  final static byte[] BL_COMMIT = 
Bytes.toBytes("D");
+155
+156  private final static String WALS_PREFIX 
= "wals:";
+157  private final static String 
SET_KEY_PREFIX = "backupset:";
+158
+159  // separator between BULK_LOAD_PREFIX 
and ordinals
+160 protected final static String 
BLK_LD_DELIM = ":";
+161  private final static byte[] EMPTY_VALUE 
= new byte[] {};
+162
+163  // Safe delimiter in a string
+164  private final static String NULL = 
"\u";
 165
-166  private void checkSystemTable() throws 
IOException {
-167try (Admin admin = 
connection.getAdmin();) {
-168
-169  verifyNamespaceExists(admin);
-170
-171  if (!admin.tableExists(tableName)) 
{
-172HTableDescriptor backupHTD =
-173
BackupSystemTable.getSystemTableDescriptor(connection.getConfiguration());
-174admin.createTable(backupHTD);
-175  }
-176  waitForSystemTable(admin);
-177}
-178  }
-179
-180  private void 
verifyNamespaceExists(Admin admin) throws IOException {
-181  String namespaceName  = 
tableName.getNamespaceAsString();
-182  NamespaceDescriptor ns = 
NamespaceDescriptor.create(namespaceName).build();
-183  NamespaceDescriptor[] list = 
admin.listNamespaceDescriptors();
-184  boolean exists = false;
-185  for( NamespaceDescriptor nsd: list) 
{
-186if 
(nsd.getName().equals(ns.getName())) {
-187  exists = true;
-188  break;
-189}
-190  }
-191  if (!exists) {
-192admin.createNamespace(ns);
-193  }
-194  }
-195
-196  private void waitForSystemTable(Admin 
admin) throws IOException {
-197long TIMEOUT = 6;
-198long startTime = 
EnvironmentEdgeManager.currentTime();
-199while (!admin.tableExists(tableName) 
|| !admin.isTableAvailable(tableName)) {
-200  try {

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index f2c44db..6cf2fc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -2581,7 +2581,7 @@
 2573try {
 2574  // Restore snapshot
 2575  get(
-2576
internalRestoreSnapshotAsync(snapshotName, tableName, false),
+2576
internalRestoreSnapshotAsync(snapshotName, tableName),
 2577syncWaitTimeout,
 2578TimeUnit.MILLISECONDS);
 2579} catch (IOException e) {
@@ -2590,7 +2590,7 @@
 2582  if (takeFailSafeSnapshot) {
 2583try {
 2584  get(
-2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false),
+2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
 2586syncWaitTimeout,
 2587TimeUnit.MILLISECONDS);
 2588  String msg = "Restore 
snapshot=" + snapshotName +
@@ -2633,7 +2633,7 @@
 2625  throw new 
TableNotDisabledException(tableName);
 2626}
 2627
-2628return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
+2628return 
internalRestoreSnapshotAsync(snapshotName, tableName);
 2629  }
 2630
 2631  @Override
@@ -2643,1621 +2643,1614 @@
 2635  }
 2636
 2637  @Override
-2638  public void cloneSnapshot(String 
snapshotName, TableName tableName, boolean restoreAcl)
+2638  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
 2639  throws IOException, 
TableExistsException, RestoreSnapshotException {
 2640if (tableExists(tableName)) {
 2641  throw new 
TableExistsException(tableName);
 2642}
 2643get(
-2644  
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
+2644  
internalRestoreSnapshotAsync(snapshotName, tableName),
 2645  Integer.MAX_VALUE,
 2646  TimeUnit.MILLISECONDS);
 2647  }
 2648
 2649  @Override
-2650  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
-2651  throws IOException, 
TableExistsException, RestoreSnapshotException {
-2652cloneSnapshot(snapshotName, 
tableName, false);
-2653  }
-2654
-2655  @Override
-2656  public FutureVoid 
cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-2657  throws IOException, 
TableExistsException {
-2658if (tableExists(tableName)) {
-2659  throw new 
TableExistsException(tableName);
-2660}
-2661return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
-2662  }
-2663
-2664  @Override
-2665  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
-2666  throws IOException {
-2667ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2668final ExecProcedureRequest request 
=
-2669
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(
-2672  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
-2673@Override
-2674protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675  return 
master.execProcedureWithRet(getRpcController(), request);
-2676}
-2677  });
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2686final ExecProcedureRequest request 
=
-2687
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2688// run the procedure on the master
-2689ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2690getConnection(), 
getRpcControllerFactory()) {
-2691  @Override
-2692  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2693return 
master.execProcedure(getRpcController(), request);
-2694  }
-2695});
-2696
-2697long start = 
EnvironmentEdgeManager.currentTime();
-2698long max = 
response.getExpectedTimeout();
-2699long maxPauseTime = max / 
this.numRetries;
-2700int tries = 0;
-2701LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2702signature + " : " + instance + 
"'' to complete. 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
index b01aa5a..8090868 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = this.reader.midkey();

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/util/class-use/NonceKey.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/NonceKey.html 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/NonceKey.html
index 8a5684c..1565e00 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/NonceKey.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/NonceKey.html
@@ -304,26 +304,29 @@
 
 
 (package private) long
-SnapshotManager.cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
+SnapshotManager.cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
  HTableDescriptorhTableDescriptor,
- NonceKeynonceKey)
+ NonceKeynonceKey,
+ booleanrestoreAcl)
 Clone the specified snapshot into a new table.
 
 
 
 private long
-SnapshotManager.cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionreqSnapshot,
+SnapshotManager.cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionreqSnapshot,
  TableNametableName,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
  HTableDescriptorsnapshotTableDesc,
- NonceKeynonceKey)
+ NonceKeynonceKey,
+ booleanrestoreAcl)
 Clone the specified snapshot.
 
 
 
 long
-SnapshotManager.restoreOrCloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionreqSnapshot,
-  NonceKeynonceKey)
+SnapshotManager.restoreOrCloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionreqSnapshot,
+  NonceKeynonceKey,
+  booleanrestoreAcl)
 Restore or Clone the specified snapshot
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index d023d86..a0c6611 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -519,14 +519,14 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.PoolMap.PoolType
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
-org.apache.hadoop.hbase.util.PrettyPrinter.Unit
-org.apache.hadoop.hbase.util.Order
+org.apache.hadoop.hbase.util.ChecksumType
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
-org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.Order
+org.apache.hadoop.hbase.util.PrettyPrinter.Unit
+org.apache.hadoop.hbase.util.PoolMap.PoolType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 881c7d9..cb8b5ed 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"2.0.0-SNAPSHOT";
-011  public static final String revision = 
"51d4c68b7cce43af1190f9195bfb08963375bc27";
+011  public static final String revision = 
"c5cc81d8e31ba76833adf25b6c357205745c23ad";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Tue 
May  9 14:40:19 UTC 2017";
+013  public static final String date = "Wed 
May 10 14:39:19 UTC 2017";
 014  public static final String url = 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.NettyConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.NettyConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.NettyConnection.html
index 27e0dee..109b5f3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.NettyConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.NettyConnection.html
@@ -67,157 +67,157 @@
 059import 
org.apache.hadoop.hbase.CellScanner;
 060import 
org.apache.hadoop.hbase.HConstants;
 061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-064import 
org.apache.hadoop.hbase.nio.ByteBuff;
-065import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-066import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-067import 
org.apache.hadoop.hbase.security.AuthMethod;
-068import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-069import 
org.apache.hadoop.hbase.security.SaslStatus;
-070import 
org.apache.hadoop.hbase.security.SaslUtil;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-073import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.JVM;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.io.IntWritable;
-079import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-080import org.apache.htrace.TraceInfo;
-081
-082/**
-083 * An RPC server with Netty4 
implementation.
-084 *
-085 */
-086public class NettyRpcServer extends 
RpcServer {
-087
-088  public static final Log LOG = 
LogFactory.getLog(NettyRpcServer.class);
-089
-090  protected final InetSocketAddress 
bindAddress;
-091
-092  private final CountDownLatch closed = 
new CountDownLatch(1);
-093  private final Channel serverChannel;
-094  private final ChannelGroup allChannels 
= new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);;
-095
-096  public NettyRpcServer(final Server 
server, final String name,
-097  final 
ListBlockingServiceAndInterface services,
-098  final InetSocketAddress 
bindAddress, Configuration conf,
-099  RpcScheduler scheduler) throws 
IOException {
-100super(server, name, services, 
bindAddress, conf, scheduler);
-101this.bindAddress = bindAddress;
-102boolean useEpoll = useEpoll(conf);
-103int workerCount = 
conf.getInt("hbase.netty.rpc.server.worker.count",
-104
Runtime.getRuntime().availableProcessors() / 4);
-105EventLoopGroup bossGroup = null;
-106EventLoopGroup workerGroup = null;
-107if (useEpoll) {
-108  bossGroup = new 
EpollEventLoopGroup(1);
-109  workerGroup = new 
EpollEventLoopGroup(workerCount);
-110} else {
-111  bossGroup = new 
NioEventLoopGroup(1);
-112  workerGroup = new 
NioEventLoopGroup(workerCount);
-113}
-114ServerBootstrap bootstrap = new 
ServerBootstrap();
-115bootstrap.group(bossGroup, 
workerGroup);
-116if (useEpoll) {
-117  
bootstrap.channel(EpollServerSocketChannel.class);
-118} else {
-119  
bootstrap.channel(NioServerSocketChannel.class);
-120}
-121
bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
-122
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
-123
bootstrap.childOption(ChannelOption.ALLOCATOR,
-124
PooledByteBufAllocator.DEFAULT);
-125bootstrap.childHandler(new 
Initializer(maxRequestSize));
-126
-127try {
-128  serverChannel = 
bootstrap.bind(this.bindAddress).sync().channel();
-129  LOG.info("NettyRpcServer bind to 
address=" + serverChannel.localAddress()
-130  + ", 
hbase.netty.rpc.server.worker.count=" + workerCount
-131  + ", useEpoll=" + useEpoll);
-132  allChannels.add(serverChannel);
-133} catch (InterruptedException e) {
-134  throw new 
InterruptedIOException(e.getMessage());
-135}
-136initReconfigurable(conf);
-137this.scheduler.init(new 
RpcSchedulerContext(this));
-138  }
-139
-140  private static boolean 
useEpoll(Configuration conf) {
-141// Config to enable native 
transport.
-142boolean epollEnabled = 
conf.getBoolean("hbase.rpc.server.nativetransport",
-143true);
-144// Use the faster native epoll 
transport mechanism on linux if enabled
-145return epollEnabled  
JVM.isLinux()  JVM.isAmd64();
-146  }
-147
-148  @Override
-149  public synchronized void start() {
-150if (started) {
-151  return;
-152}

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index dba11f3..84aca73 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -33,723 +33,778 @@
 025
 026import 
org.apache.hadoop.hbase.HColumnDescriptor;
 027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.hadoop.hbase.ServerName;
-029import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-030import 
org.apache.hadoop.hbase.TableName;
-031import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-032import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-033import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-034import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-035import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-036import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-037import 
org.apache.hadoop.hbase.util.Pair;
-038
-039/**
-040 * The asynchronous administrative API 
for HBase.
-041 * p
-042 * This feature is still under 
development, so marked as IA.Private. Will change to public when
-043 * done. Use it with caution.
-044 */
-045@InterfaceAudience.Private
-046public interface AsyncAdmin {
-047
-048  /**
-049   * @return Async Connection used by 
this object.
-050   */
-051  AsyncConnectionImpl getConnection();
-052
-053  /**
-054   * @param tableName Table to check.
-055   * @return True if table exists 
already. The return value will be wrapped by a
-056   * {@link CompletableFuture}.
-057   */
-058  CompletableFutureBoolean 
tableExists(final TableName tableName);
-059
-060  /**
-061   * List all the userspace tables.
-062   * @return - returns an array of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-063   * @see #listTables(Pattern, boolean)
-064   */
-065  
CompletableFutureTableDescriptor[] listTables();
-066
-067  /**
-068   * List all the tables matching the 
given pattern.
-069   * @param regex The regular expression 
to match against
-070   * @param includeSysTables False to 
match only against userspace tables
-071   * @return - returns an array of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-072   * @see #listTables(Pattern, boolean)
-073   */
-074  
CompletableFutureTableDescriptor[] listTables(String regex, boolean 
includeSysTables);
-075
-076  /**
-077   * List all the tables matching the 
given pattern.
-078   * @param pattern The compiled regular 
expression to match against
-079   * @param includeSysTables False to 
match only against userspace tables
-080   * @return - returns an array of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-081   */
-082  
CompletableFutureTableDescriptor[] listTables(Pattern pattern, boolean 
includeSysTables);
-083
-084  /**
-085   * List all of the names of userspace 
tables.
-086   * @return TableName[] an array of 
table names wrapped by a {@link CompletableFuture}.
-087   * @see #listTableNames(Pattern, 
boolean)
-088   */
-089  CompletableFutureTableName[] 
listTableNames();
-090
-091  /**
-092   * List all of the names of userspace 
tables.
-093   * @param regex The regular expression 
to match against
-094   * @param includeSysTables False to 
match only against userspace tables
-095   * @return TableName[] an array of 
table names wrapped by a {@link CompletableFuture}.
-096   * @see #listTableNames(Pattern, 
boolean)
-097   */
-098  CompletableFutureTableName[] 
listTableNames(final String regex, final boolean includeSysTables);
-099
-100  /**
-101   * List all of the names of userspace 
tables.
-102   * @param pattern The regular 
expression to match against
-103   * @param includeSysTables False to 
match only against userspace tables
-104   * @return TableName[] an array of 
table names wrapped by a {@link CompletableFuture}.
-105   */
-106  CompletableFutureTableName[] 
listTableNames(final Pattern pattern,
-107  final boolean includeSysTables);
-108
-109  /**
-110   * Method for getting the 
tableDescriptor
-111   * @param tableName as a {@link 
TableName}
-112   * @return the read-only 
tableDescriptor wrapped by a {@link CompletableFuture}.
-113   */
-114  
CompletableFutureTableDescriptor getTableDescriptor(final TableName 
tableName);
-115
-116  /**
-117   * Creates a new table.
-118   * @param desc table descriptor for 
table
-119   */
-120  CompletableFutureVoid 
createTable(TableDescriptor desc);
-121
-122  /**
-123   * Creates a new table with the 
specified number of regions. The start key specified will become
-124   * the end key of the first region of 
the table, and the end key specified will become the start
-125   * key of the last region of the table 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index 6c52543..f3f7a46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -31,1797 +31,2040 @@
 023import java.util.ArrayList;
 024import java.util.Arrays;
 025import java.util.Collection;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Optional;
-031import 
java.util.concurrent.CompletableFuture;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import java.util.function.BiConsumer;
-035import java.util.regex.Pattern;
-036import java.util.stream.Collectors;
-037
-038import 
com.google.common.annotations.VisibleForTesting;
-039
-040import io.netty.util.Timeout;
-041import io.netty.util.TimerTask;
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-049import 
org.apache.hadoop.hbase.NotServingRegionException;
-050import 
org.apache.hadoop.hbase.RegionLocations;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-053import 
org.apache.hadoop.hbase.HConstants;
-054import 
org.apache.hadoop.hbase.TableExistsException;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-057import 
org.apache.hadoop.hbase.TableNotFoundException;
-058import 
org.apache.hadoop.hbase.UnknownRegionException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-061import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-062import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-063import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-064import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-065import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-066import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-067import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-068import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-069import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-070import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-091import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/Stoppable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/Stoppable.html 
b/devapidocs/org/apache/hadoop/hbase/Stoppable.html
index 08e0401..b514550 100644
--- a/devapidocs/org/apache/hadoop/hbase/Stoppable.html
+++ b/devapidocs/org/apache/hadoop/hbase/Stoppable.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-AuthenticationTokenSecretManager.LeaderElector,
 BackupHFileCleaner, BackupLogCleaner, BaseFileCleanerDelegate, BaseHFileCleanerDelegate, BaseLoadBalancer, BaseLogCleanerDelegate, DumpReplicationQueues.WarnOnlyStoppable,
 FavoredNodeLoadBalancer, 
HFileLinkCleaner, 
HMaster, HMasterCommandLine.LocalHMaster, 
HRegionServer, HRegionServer.MovedRegionsCleaner, LogRollMasterProcedureManager, LongTermArchivingHFileCleaner, MasterFlushTableProcedureManager, 
MasterProcedureManager, 
NoLimitThroughputController, 
PressureAwareCompactionThroughputController,
 PressureAwareFlushThroughputController,
 PressureAwareThroughputContro
 ller, QuotaCache, ReplicationHFileCleaner, ReplicationLogCleaner, ReplicationSyncUp.DummyServer,
 RSGroupBasedLoadBalancer, 
ShutdownHook.DoNothingStoppable, SimpleLoadBalancer, SnapshotFileCache, SnapshotHFileCleaner, SnapshotManager, 
StochasticLoadBalancer, TimeToLiveHFileCleaner, TimeToLiveLogCleaner
+AuthenticationTokenSecretManager.LeaderElector,
 BackupHFileCleaner, BackupLogCleaner, BaseFileCleanerDelegate, BaseHFileCleanerDelegate, BaseLoadBalancer, BaseLogCleanerDelegate, DumpReplicationQueues.WarnOnlyStoppable,
 FavoredNodeLoadBalancer, 
FavoredStochasticBalancer, HFileLinkCleaner, 
HMaster, HMasterCommandLine.LocalHMaster, HRegionServer, HRegionServer.MovedRegionsCleaner, LogRollMasterProcedureManager, LongTermArchivingHFileCleaner, MasterFlushTableProcedureManager, 
MasterProcedureManager, 
NoLimitThroughputController, 
PressureAwareCompactionThroughputController,
 PressureAwareFlushThroughputController,
 PressureAwareThroughputController,
 QuotaCache, ReplicationHFileCleaner, ReplicationLogCleaner, ReplicationSyncUp.DummyServer,
 RSGroupBasedLoadBalancer, ShutdownHook.DoNothingStoppable, SimpleLoadBalancer, SnapshotFileCache, SnapshotHFileCleaner, SnapshotManager, 
StochasticLoadBalancer, TimeToLiveHFileCleaner, TimeToLiveLogCleaner
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html
 
b/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html
index ed087c7..421c21d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/backup/BackupRestoreConstants.BackupCommand.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum BackupRestoreConstants.BackupCommand
+public static enum BackupRestoreConstants.BackupCommand
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumBackupRestoreConstants.BackupCommand
 
 
@@ -258,7 +258,7 @@ the order they are declared.
 
 
 CREATE
-public static finalBackupRestoreConstants.BackupCommand CREATE
+public static finalBackupRestoreConstants.BackupCommand CREATE
 
 
 
@@ -267,7 +267,7 @@ the order they are declared.
 
 
 CANCEL
-public static finalBackupRestoreConstants.BackupCommand CANCEL
+public static finalBackupRestoreConstants.BackupCommand CANCEL
 
 
 
@@ -276,7 +276,7 @@ the order they are declared.
 
 
 DELETE
-public static finalBackupRestoreConstants.BackupCommand DELETE
+public static finalBackupRestoreConstants.BackupCommand DELETE
 
 
 
@@ -285,7 +285,7 @@ the order they are declared.
 
 
 DESCRIBE
-public static finalBackupRestoreConstants.BackupCommand DESCRIBE
+public static finalBackupRestoreConstants.BackupCommand DESCRIBE
 
 
 
@@ -294,7 +294,7 @@ the order they are declared.
 
 
 HISTORY
-public static finalBackupRestoreConstants.BackupCommand HISTORY
+public static finalBackupRestoreConstants.BackupCommand HISTORY
 
 
 
@@ -303,7 +303,7 @@ the order they are declared.
 
 
 STATUS
-public static finalBackupRestoreConstants.BackupCommand STATUS
+public static finalBackupRestoreConstants.BackupCommand STATUS
 
 
 
@@ -312,7 +312,7 @@ the order they are declared.
 
 
 CONVERT
-public static finalBackupRestoreConstants.BackupCommand CONVERT
+public static finalBackupRestoreConstants.BackupCommand CONVERT
 
 
 
@@ -321,7 +321,7 @@ the order they are declared.
 
 
 MERGE
-public static finalBackupRestoreConstants.BackupCommand MERGE
+public static finalBackupRestoreConstants.BackupCommand MERGE
 
 
 
@@ -330,7 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index be839b7..72853dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -45,1639 +45,1784 @@
 037
 038import 
com.google.common.annotations.VisibleForTesting;
 039
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.hbase.HColumnDescriptor;
-043import 
org.apache.hadoop.hbase.HRegionInfo;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.HTableDescriptor;
-046import 
org.apache.hadoop.hbase.MetaTableAccessor;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-048import 
org.apache.hadoop.hbase.NotServingRegionException;
-049import 
org.apache.hadoop.hbase.RegionLocations;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-055import 
org.apache.hadoop.hbase.TableNotFoundException;
-056import 
org.apache.hadoop.hbase.UnknownRegionException;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-059import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-060import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-061import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-062import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-063import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-066import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-067import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-068import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-069import 
org.apache.hadoop.hbase.replication.ReplicationException;
-070import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-071import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-095import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index ac4a9b3..be839b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -30,212 +30,212 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Optional;
-028import 
java.util.concurrent.CompletableFuture;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import java.util.function.BiConsumer;
-032import java.util.regex.Pattern;
-033
-034import 
com.google.common.annotations.VisibleForTesting;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.HRegionLocation;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.MetaTableAccessor;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-043import 
org.apache.hadoop.hbase.NotServingRegionException;
-044import 
org.apache.hadoop.hbase.RegionLocations;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.UnknownRegionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-054import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-055import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-060import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-061import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-085import 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
new file mode 100644
index 000..32185d0
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
@@ -0,0 +1,508 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ChunkCreator.MemStoreChunkPool (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
ChunkCreator.MemStoreChunkPool
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool
+
+
+
+
+
+
+
+All Implemented Interfaces:
+HeapMemoryManager.HeapMemoryTuneObserver
+
+
+Enclosing class:
+ChunkCreator
+
+
+
+private class ChunkCreator.MemStoreChunkPool
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements HeapMemoryManager.HeapMemoryTuneObserver
+A pool of Chunk 
instances.
+
+ MemStoreChunkPool caches a number of retired chunks for reusing, it could
+ decrease allocating bytes when writing, thereby optimizing the garbage
+ collection on JVM.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+private class
+ChunkCreator.MemStoreChunkPool.StatisticsThread
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+chunkCount
+
+
+private int
+maxCount
+
+
+private float
+poolSizePercentage
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 title="class or interface in java.util.concurrent">BlockingQueueChunk
+reclaimedChunks
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+reusedChunkCount
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService
+scheduleThreadPool
+Statistics thread schedule pool
+
+
+
+private static int
+statThreadPeriod
+Statistics thread
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+MemStoreChunkPool(intmaxCount,
+ intinitialCount,
+ floatpoolSizePercentage)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) Chunk
+getChunk()
+Poll a chunk from the pool, reset it if not null, else 
create a new chunk to return if we have
+ not yet created max allowed chunks count.
+
+
+
+private int
+getMaxCount()
+
+
+void
+onHeapMemoryTune(longnewMemstoreSize,
+longnewBlockCacheSize)
+This method would be called by HeapMemoryManger when a heap 
memory tune action took place.
+
+
+
+private void
+putbackChunks(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integerchunks)
+Add the chunks to the pool, when the pool achieves the max 
size, it will skip the remaining
+ chunks
+
+
+
+
+
+
+

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.PeriodicDoMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.PeriodicDoMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.PeriodicDoMetrics.html
index a58f559..98b388b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.PeriodicDoMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.PeriodicDoMetrics.html
@@ -756,2562 +756,2560 @@
 748
 749this.masterActiveTime = 
System.currentTimeMillis();
 750// TODO: Do this using Dependency 
Injection, using PicoContainer, Guice or Spring.
-751// Initialize the chunkCreator
-752initializeMemStoreChunkCreator();
-753this.fileSystemManager = new 
MasterFileSystem(this);
-754this.walManager = new 
MasterWalManager(this);
-755
-756// enable table descriptors cache
-757this.tableDescriptors.setCacheOn();
-758// set the META's descriptor to the 
correct replication
-759
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
-760
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
-761// warm-up HTDs cache on master 
initialization
-762if (preLoadTableDescriptors) {
-763  status.setStatus("Pre-loading table 
descriptors");
-764  this.tableDescriptors.getAll();
-765}
-766
-767// publish cluster ID
-768status.setStatus("Publishing Cluster 
ID in ZooKeeper");
-769
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
-770this.initLatch.countDown();
+751this.fileSystemManager = new 
MasterFileSystem(this);
+752this.walManager = new 
MasterWalManager(this);
+753
+754// enable table descriptors cache
+755this.tableDescriptors.setCacheOn();
+756// set the META's descriptor to the 
correct replication
+757
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
+758
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
+759// warm-up HTDs cache on master 
initialization
+760if (preLoadTableDescriptors) {
+761  status.setStatus("Pre-loading table 
descriptors");
+762  this.tableDescriptors.getAll();
+763}
+764
+765// publish cluster ID
+766status.setStatus("Publishing Cluster 
ID in ZooKeeper");
+767
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
+768this.initLatch.countDown();
+769
+770this.serverManager = 
createServerManager(this);
 771
-772this.serverManager = 
createServerManager(this);
+772this.tableStateManager = new 
TableStateManager(this);
 773
-774this.tableStateManager = new 
TableStateManager(this);
-775
-776status.setStatus("Initializing ZK 
system trackers");
-777initializeZKBasedSystemTrackers();
-778
-779// This is for backwards 
compatibility
-780// See HBASE-11393
-781status.setStatus("Update TableCFs 
node in ZNode");
-782TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
-783conf, 
this.clusterConnection);
-784tableCFsUpdater.update();
-785
-786// initialize master side 
coprocessors before we start handling requests
-787status.setStatus("Initializing master 
coprocessors");
-788this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
-789
-790// start up all service threads.
-791status.setStatus("Initializing master 
service threads");
-792startServiceThreads();
-793
-794// Wake up this server to check in
-795sleeper.skipSleepCycle();
-796
-797// Wait for region servers to report 
in
-798status.setStatus("Wait for region 
servers to report in");
-799waitForRegionServers(status);
-800
-801// get a list for previously failed 
RS which need log splitting work
-802// we recover hbase:meta region 
servers inside master initialization and
-803// handle other failed servers in SSH 
in order to start up master node ASAP
-804MasterMetaBootstrap metaBootstrap = 
createMetaBootstrap(this, status);
-805
metaBootstrap.splitMetaLogsBeforeAssignment();
+774status.setStatus("Initializing ZK 
system trackers");
+775initializeZKBasedSystemTrackers();
+776
+777// This is for backwards 
compatibility
+778// See HBASE-11393
+779status.setStatus("Update TableCFs 
node in ZNode");
+780TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
+781conf, 
this.clusterConnection);
+782tableCFsUpdater.update();
+783
+784// initialize master side 
coprocessors before we start handling requests
+785status.setStatus("Initializing master 
coprocessors");
+786this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
+787
+788// start up all service threads.
+789status.setStatus("Initializing master 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.HeapMemoryTuneObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.HeapMemoryTuneObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.HeapMemoryTuneObserver.html
index ce24369..542de65 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.HeapMemoryTuneObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.HeapMemoryTuneObserver.html
@@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-MemStoreChunkPool
+ChunkCreator.MemStoreChunkPool
 
 
 Enclosing class:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
index 3edab8c..3735d85 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -475,7 +475,7 @@ public interface 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
deleted file mode 100644
index 8bdc36c..000
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.StatisticsThread.html
+++ /dev/null
@@ -1,341 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-MemStoreChunkPool.StatisticsThread (Apache HBase 2.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = {"i0":10,"i1":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.regionserver
-Class 
MemStoreChunkPool.StatisticsThread
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">java.lang.Thread
-
-
-org.apache.hadoop.hbase.regionserver.MemStoreChunkPool.StatisticsThread
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
-
-
-Enclosing class:
-MemStoreChunkPool
-
-
-
-private class MemStoreChunkPool.StatisticsThread
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
-http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.State.html?is-external=true;
 title="class or interface in java.lang">Thread.State, http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.UncaughtExceptionHandler.html?is-external=true;
 title="class or interface in 
java.lang">Thread.UncaughtExceptionHandler
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/CellUtil.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellUtil.html 
b/apidocs/org/apache/hadoop/hbase/class-use/CellUtil.html
deleted file mode 100644
index d7254a3..000
--- a/apidocs/org/apache/hadoop/hbase/class-use/CellUtil.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.CellUtil (Apache HBase 
2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses 
of Classorg.apache.hadoop.hbase.CellUtil
-
-No usage of 
org.apache.hadoop.hbase.CellUtil
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/ChoreService.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/ChoreService.html 
b/apidocs/org/apache/hadoop/hbase/class-use/ChoreService.html
deleted file mode 100644
index 08afe59..000
--- a/apidocs/org/apache/hadoop/hbase/class-use/ChoreService.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.ChoreService (Apache HBase 
2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of Classorg.apache.hadoop.hbase.ChoreService
-
-No usage of 
org.apache.hadoop.hbase.ChoreService
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/ClockOutOfSyncException.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/class-use/ClockOutOfSyncException.html 
b/apidocs/org/apache/hadoop/hbase/class-use/ClockOutOfSyncException.html
deleted file mode 100644
index 38a6299..000
--- a/apidocs/org/apache/hadoop/hbase/class-use/ClockOutOfSyncException.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.ClockOutOfSyncException (Apache 
HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-