[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
index 37a2235..386a41b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface RSRpcServices.LogDelegate
+static interface RSRpcServices.LogDelegate
 
 
 
@@ -150,7 +150,7 @@ var activeTableTab = "activeTableTab";
 
 
 logBatchWarning
-voidlogBatchWarning(intsum,
+voidlogBatchWarning(intsum,
  introwSizeWarnThreshold)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index b6c32b6..8234a37 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class RSRpcServices.RegionScannerCloseCallBack
+private static final class RSRpcServices.RegionScannerCloseCallBack
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RpcCallback
 An Rpc callback for closing a RegionScanner.
@@ -209,7 +209,7 @@ implements 
 
 scanner
-private finalRegionScanner scanner
+private finalRegionScanner scanner
 
 
 
@@ -226,7 +226,7 @@ implements 
 
 RegionScannerCloseCallBack
-publicRegionScannerCloseCallBack(RegionScannerscanner)
+publicRegionScannerCloseCallBack(RegionScannerscanner)
 
 
 
@@ -243,7 +243,7 @@ implements 
 
 run
-publicvoidrun()
+publicvoidrun()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RpcCallback
 Called at the end of an Rpc Call RpcCallContext

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index dc81296..a084b74 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class RSRpcServices.RegionScannerHolder
+private static final class RSRpcServices.RegionScannerHolder
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Holder class which holds the RegionScanner, nextCallSeq and 
RpcCallbacks together.
 
@@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nextCallSeq
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
 
 
 
@@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 scannerName
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerName
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerName
 
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 s
-private finalRegionScanner s
+private finalRegionScanner s
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 r
-private finalRegion r
+private finalRegion r
 
 
 
@@ -275,7 +275,7 @@ extends 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 33adf9c..35769e1 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -1960,111 +1960,111 @@ service.
 
 
 private TableName
-SnapshotDescription.table
+RegionCoprocessorRpcChannel.table
 
 
 private TableName
-RegionCoprocessorRpcChannel.table
+SnapshotDescription.table
 
 
 private TableName
-RawAsyncTableImpl.tableName
+HRegionLocator.tableName
 
 
 private TableName
-RegionServerCallable.tableName
+ScannerCallableWithReplicas.tableName
 
 
 protected TableName
-RegionAdminServiceCallable.tableName
+ClientScanner.tableName
 
 
 private TableName
-BufferedMutatorImpl.tableName
+AsyncClientScanner.tableName
 
 
 private TableName
-AsyncProcessTask.tableName
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName
 
 
 private TableName
-AsyncProcessTask.Builder.tableName
+AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName
 
 
 private TableName
-AsyncRequestFutureImpl.tableName
+RawAsyncTableImpl.tableName
 
 
-protected TableName
-TableBuilderBase.tableName
+private TableName
+RegionCoprocessorRpcChannelImpl.tableName
 
 
 private TableName
-AsyncBatchRpcRetryingCaller.tableName
+AsyncTableRegionLocatorImpl.tableName
 
 
-private TableName
-HTable.tableName
+protected TableName
+RegionAdminServiceCallable.tableName
 
 
 private TableName
-TableState.tableName
+HTable.tableName
 
 
-protected TableName
-RpcRetryingCallerWithReadReplicas.tableName
+private TableName
+BufferedMutatorImpl.tableName
 
 
-protected TableName
-AsyncTableBuilderBase.tableName
+private TableName
+AsyncBatchRpcRetryingCaller.tableName
 
 
 private TableName
-AsyncSingleRequestRpcRetryingCaller.tableName
+BufferedMutatorParams.tableName
 
 
 private TableName
-ScannerCallableWithReplicas.tableName
+HBaseAdmin.TableFuture.tableName
 
 
-protected TableName
-RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName
+private TableName
+AsyncRequestFutureImpl.tableName
 
 
 private TableName
-AsyncTableRegionLocatorImpl.tableName
+AsyncProcessTask.tableName
 
 
 private TableName
-HBaseAdmin.TableFuture.tableName
+AsyncProcessTask.Builder.tableName
 
 
-private TableName
-RegionCoprocessorRpcChannelImpl.tableName
+protected TableName
+RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName
 
 
 private TableName
-ClientScanner.tableName
+RegionServerCallable.tableName
 
 
 private TableName
-BufferedMutatorParams.tableName
+AsyncSingleRequestRpcRetryingCaller.tableName
 
 
-private TableName
-AsyncClientScanner.tableName
+protected TableName
+TableBuilderBase.tableName
 
 
-private TableName
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName
+protected TableName
+RpcRetryingCallerWithReadReplicas.tableName
 
 
-private TableName
-AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName
+protected TableName
+AsyncTableBuilderBase.tableName
 
 
 private TableName
-HRegionLocator.tableName
+TableState.tableName
 
 
 
@@ -2102,37 +2102,27 @@ service.
 
 
 TableName
-RawAsyncTableImpl.getName()
-
-
-TableName
-RegionLocator.getName()
+Table.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
-
-TableName
-BufferedMutatorImpl.getName()
-
 
 TableName
-BufferedMutator.getName()
-Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
-
+HRegionLocator.getName()
 
 
 TableName
-HTable.getName()
+AsyncTableRegionLocator.getName()
+Gets the fully qualified table name instance of the table 
whose region we want to locate.
+
 
 
 TableName
-Table.getName()
-Gets the fully qualified table name instance of this 
table.
-
+AsyncTableImpl.getName()
 
 
 TableName
-AsyncTableImpl.getName()
+RawAsyncTableImpl.getName()
 
 
 TableName
@@ -2140,35 +2130,37 @@ service.
 
 
 TableName
-AsyncTableRegionLocator.getName()
-Gets the fully qualified table name instance of the table 
whose region we want to locate.
+BufferedMutator.getName()
+Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
 
 
 
 TableName
-AsyncTableBase.getName()
+RegionLocator.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
 
 TableName
-HRegionLocator.getName()
+HTable.getName()
 
 
 TableName
-HTableWrapper.getName()
+BufferedMutatorImpl.getName()
 
 
-protected TableName
-ClientScanner.getTable()
+TableName
+AsyncTableBase.getName()
+Gets the fully qualified table name instance of this 
table.
+
 
 
 TableName
-RegionServerCallable.getTableName()
+HTableWrapper.getName()
 
 
-TableName
-SnapshotDescription.getTableName()
+protected TableName
+ClientScanner.getTable()
 
 
 TableName
@@ -2178,19 +2170,17 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }
-458}
-459
-460// Put back on the queue.  Have 
it come back out of 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 6ade55e..4f55b6b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"8f006582e30b106f0d9841c520250dfd6db4a689";
+011  public static final String revision = 
"3d81f7b9e7b3c15fbe7b987901e521ba01e3e3cf";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Mon 
Jul 24 18:40:10 UTC 2017";
+013  public static final String date = "Wed 
Jul 26 14:39:51 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "eb04d35f9663a90df7d1f87f9f4ec0a6";
+015  public static final String srcChecksum 
= "0276f0a1f9fcef85f05be6dbc92a0ec0";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
index 91fe6e6..02e3eb8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html
@@ -223,7 +223,14 @@
 215   * @return Column family descriptor 
with only the customized attributes.
 216   */
 217  String toStringCustomizedValues();
-218}
+218
+219  /**
+220   * By default, HBase only consider 
timestamp in versions. So a previous Delete with higher ts
+221   * will mask a later Put with lower ts. 
Set this to true to enable new semantics of versions.
+222   * We will also consider mvcc in 
versions. See HBASE-15968 for details.
+223   */
+224  boolean isNewVersionBehavior();
+225}
 
 
 



[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
index f7fa6c8..b83924f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
@@ -191,7 +191,7 @@
 183   * A lock can be anything, and it is up 
to the implementor. The Procedure
 184   * Framework will call this method just 
before it invokes {@link #execute(Object)}.
 185   * It calls {@link 
#releaseLock(Object)} after the call to execute.
-186   * 
+186   *
 187   * pIf you need to hold the 
lock for the life of the Procdure -- i.e. you do not
 188   * want any other Procedure interfering 
while this Procedure is running, see
 189   * {@link #holdLock(Object)}.
@@ -474,461 +474,465 @@
 466return rootProcId;
 467  }
 468
-469  public NonceKey getNonceKey() {
-470return nonceKey;
+469  public String getProcName() {
+470return toStringClass();
 471  }
 472
-473  public long getSubmittedTime() {
-474return submittedTime;
+473  public NonceKey getNonceKey() {
+474return nonceKey;
 475  }
 476
-477  public String getOwner() {
-478return owner;
+477  public long getSubmittedTime() {
+478return submittedTime;
 479  }
 480
-481  public boolean hasOwner() {
-482return owner != null;
+481  public String getOwner() {
+482return owner;
 483  }
 484
-485  /**
-486   * Called by the ProcedureExecutor to 
assign the ID to the newly created procedure.
-487   */
-488  @VisibleForTesting
-489  @InterfaceAudience.Private
-490  protected void setProcId(final long 
procId) {
-491this.procId = procId;
-492this.submittedTime = 
EnvironmentEdgeManager.currentTime();
-493setState(ProcedureState.RUNNABLE);
-494  }
-495
-496  /**
-497   * Called by the ProcedureExecutor to 
assign the parent to the newly created procedure.
-498   */
-499  @InterfaceAudience.Private
-500  protected void setParentProcId(final 
long parentProcId) {
-501this.parentProcId = parentProcId;
-502  }
-503
-504  @InterfaceAudience.Private
-505  protected void setRootProcId(final long 
rootProcId) {
-506this.rootProcId = rootProcId;
-507  }
-508
-509  /**
-510   * Called by the ProcedureExecutor to 
set the value to the newly created procedure.
-511   */
-512  @VisibleForTesting
-513  @InterfaceAudience.Private
-514  protected void setNonceKey(final 
NonceKey nonceKey) {
-515this.nonceKey = nonceKey;
-516  }
-517
-518  @VisibleForTesting
-519  @InterfaceAudience.Private
-520  public void setOwner(final String 
owner) {
-521this.owner = 
StringUtils.isEmpty(owner) ? null : owner;
-522  }
-523
-524  public void setOwner(final User owner) 
{
-525assert owner != null : "expected 
owner to be not null";
-526setOwner(owner.getShortName());
-527  }
-528
-529  /**
-530   * Called on store load to initialize 
the Procedure internals after
-531   * the creation/deserialization.
-532   */
-533  @InterfaceAudience.Private
-534  protected void setSubmittedTime(final 
long submittedTime) {
-535this.submittedTime = submittedTime;
-536  }
-537
-538  // 
==
-539  //  runtime state - timeout related
-540  // 
==
-541  /**
-542   * @param timeout timeout interval in 
msec
-543   */
-544  protected void setTimeout(final int 
timeout) {
-545this.timeout = timeout;
-546  }
-547
-548  public boolean hasTimeout() {
-549return timeout != NO_TIMEOUT;
+485  public boolean hasOwner() {
+486return owner != null;
+487  }
+488
+489  /**
+490   * Called by the ProcedureExecutor to 
assign the ID to the newly created procedure.
+491   */
+492  @VisibleForTesting
+493  @InterfaceAudience.Private
+494  protected void setProcId(final long 
procId) {
+495this.procId = procId;
+496this.submittedTime = 
EnvironmentEdgeManager.currentTime();
+497setState(ProcedureState.RUNNABLE);
+498  }
+499
+500  /**
+501   * Called by the ProcedureExecutor to 
assign the parent to the newly created procedure.
+502   */
+503  @InterfaceAudience.Private
+504  protected void setParentProcId(final 
long parentProcId) {
+505this.parentProcId = parentProcId;
+506  }
+507
+508  @InterfaceAudience.Private
+509  protected void setRootProcId(final long 
rootProcId) {
+510this.rootProcId = rootProcId;
+511  }
+512
+513  /**
+514   * Called by the ProcedureExecutor to 
set the value to the newly created procedure.
+515   */
+516  @VisibleForTesting
+517  @InterfaceAudience.Private
+518  protected void setNonceKey(final 
NonceKey nonceKey) {
+519this.nonceKey = 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index e690c2d..ec75aa9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -818,1092 +818,1070 @@
 810});
 811  }
 812
-813  public void preDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-814  throws IOException {
+813  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
+814  throws IOException {
 815execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 816  @Override
 817  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 818  throws IOException {
-819oserver.preDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+819oserver.preMergeRegions(ctx, 
regionsToMerge);
 820  }
 821});
 822  }
 823
-824  public void postDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-825  throws IOException {
+824  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
+825  throws IOException {
 826execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 827  @Override
 828  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 829  throws IOException {
-830oserver.postDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+830oserver.postMergeRegions(ctx, 
regionsToMerge);
 831  }
 832});
 833  }
 834
-835  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
-836  throws IOException {
-837execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-838  @Override
-839  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-840  throws IOException {
-841oserver.preMergeRegions(ctx, 
regionsToMerge);
-842  }
-843});
-844  }
-845
-846  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
-847  throws IOException {
-848execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-849  @Override
-850  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-851  throws IOException {
-852oserver.postMergeRegions(ctx, 
regionsToMerge);
-853  }
-854});
-855  }
-856
-857  public boolean preBalance() throws 
IOException {
-858return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-859  @Override
-860  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-861  throws IOException {
-862oserver.preBalance(ctx);
-863  }
-864});
-865  }
-866
-867  public void postBalance(final 
ListRegionPlan plans) throws IOException {
+835  public boolean preBalance() throws 
IOException {
+836return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+837  @Override
+838  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+839  throws IOException {
+840oserver.preBalance(ctx);
+841  }
+842});
+843  }
+844
+845  public void postBalance(final 
ListRegionPlan plans) throws IOException {
+846execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
+847  @Override
+848  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+849  throws IOException {
+850oserver.postBalance(ctx, 
plans);
+851  }
+852});
+853  }
+854
+855  public boolean 
preSetSplitOrMergeEnabled(final boolean newValue,
+856  final MasterSwitchType switchType) 
throws IOException {
+857return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+858  @Override
+859  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+860  throws IOException {
+861
oserver.preSetSplitOrMergeEnabled(ctx, newValue, switchType);
+862  }
+863});
+864  }
+865
+866  public void 
postSetSplitOrMergeEnabled(final boolean newValue,
+867  final MasterSwitchType switchType) 
throws IOException {
 868execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 869  @Override
 870  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 871  throws IOException {
-872oserver.postBalance(ctx, 
plans);
+872
oserver.postSetSplitOrMergeEnabled(ctx, 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index 0c6b2bd..01308c9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -338,22 +338,26 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Increment
-setReturnResults(booleanreturnResults)
+setPriority(intpriority)
 
 
 Increment
+setReturnResults(booleanreturnResults)
+
+
+Increment
 setTimeRange(longminStamp,
 longmaxStamp)
 Sets the TimeRange to be used on the Get for this 
increment.
 
 
-
+
 Increment
 setTTL(longttl)
 Set the TTL desired for the result of the mutation, in 
milliseconds.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Produces a string representation of this Operation.
@@ -372,7 +376,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Methods inherited from classorg.apache.hadoop.hbase.client.OperationWithAttributes
-getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId
+getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId,
 getPriority
 
 
 
@@ -490,7 +494,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 add
-publicIncrementadd(Cellcell)
+publicIncrementadd(Cellcell)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Add the specified KeyValue to this operation.
 
@@ -509,7 +513,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumn
-publicIncrementaddColumn(byte[]family,
+publicIncrementaddColumn(byte[]family,
byte[]qualifier,
longamount)
 Increment the column from the specific family with the 
specified qualifier
@@ -532,7 +536,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getTimeRange
-publicTimeRangegetTimeRange()
+publicTimeRangegetTimeRange()
 Gets the TimeRange used for this increment.
 
 Returns:
@@ -546,7 +550,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-publicIncrementsetTimeRange(longminStamp,
+publicIncrementsetTimeRange(longminStamp,
   longmaxStamp)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -574,7 +578,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-publicIncrementsetReturnResults(booleanreturnResults)
+publicIncrementsetReturnResults(booleanreturnResults)
 
 Overrides:
 setReturnResultsin
 classMutation
@@ -591,7 +595,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Overrides:
 isReturnResultsin
 classMutation
@@ -606,7 +610,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 numFamilies
-publicintnumFamilies()
+publicintnumFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -622,7 +626,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-publicbooleanhasFamilies()
+publicbooleanhasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 
 Returns:
@@ -636,7 +640,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.VisibilityReplication.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.VisibilityReplication.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.VisibilityReplication.html
index 5dd7acd..c387500 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.VisibilityReplication.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.VisibilityReplication.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class VisibilityController.VisibilityReplication
+public static class VisibilityController.VisibilityReplication
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionServerObserver
 A RegionServerObserver impl that provides the custom
@@ -255,7 +255,7 @@ implements 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -264,7 +264,7 @@ implements 
 
 visibilityLabelService
-privateVisibilityLabelService visibilityLabelService
+privateVisibilityLabelService visibilityLabelService
 
 
 
@@ -281,7 +281,7 @@ implements 
 
 VisibilityReplication
-publicVisibilityReplication()
+publicVisibilityReplication()
 
 
 
@@ -298,7 +298,7 @@ implements 
 
 start
-publicvoidstart(CoprocessorEnvironmentenv)
+publicvoidstart(CoprocessorEnvironmentenv)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -314,7 +314,7 @@ implements 
 
 stop
-publicvoidstop(CoprocessorEnvironmentenv)
+publicvoidstop(CoprocessorEnvironmentenv)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -330,7 +330,7 @@ implements 
 
 postCreateReplicationEndPoint
-publicReplicationEndpointpostCreateReplicationEndPoint(ObserverContextRegionServerCoprocessorEnvironmentctx,
+publicReplicationEndpointpostCreateReplicationEndPoint(ObserverContextRegionServerCoprocessorEnvironmentctx,
  ReplicationEndpointendpoint)
 Description copied from 
interface:RegionServerObserver
 This will be called after the replication endpoint is 
instantiated.



[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 504e470..38667c0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating status codes and 
tracking the index at which processing
-2969   * is proceeding.

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-spark/license.html
--
diff --git a/hbase-spark/license.html b/hbase-spark/license.html
index 2ce08f1..ea4308f 100644
--- a/hbase-spark/license.html
+++ b/hbase-spark/license.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-11
+Last Published: 2017-07-12
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Spark
@@ -117,210 +117,7 @@
 Project Licenses
 
 Apache License, Version 
2.0
-
-
- Apache License
-   Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-  License shall mean the terms and conditions for use, 
reproduction,
-  and distribution as defined by Sections 1 through 9 of this document.
-
-  Licensor shall mean the copyright owner or entity authorized 
by
-  the copyright owner that is granting the License.
-
-  Legal Entity shall mean the union of the acting entity and 
all
-  other entities that control, are controlled by, or are under common
-  control with that entity. For the purposes of this definition,
-  control means (i) the power, direct or indirect, to cause the
-  direction or management of such entity, whether by contract or
-  otherwise, or (ii) ownership of fifty percent (50%) or more of the
-  outstanding shares, or (iii) beneficial ownership of such entity.
-
-  You (or Your) shall mean an individual or Legal 
Entity
-  exercising permissions granted by this License.
-
-  Source form shall mean the preferred form for making 
modifications,
-  including but not limited to software source code, documentation
-  source, and configuration files.
-
-  Object form shall mean any form resulting from mechanical
-  transformation or translation of a Source form, including but
-  not limited to compiled object code, generated documentation,
-  and conversions to other media types.
-
-  Work shall mean the work of authorship, whether in Source or
-  Object form, made available under the License, as indicated by a
-  copyright notice that is included in or attached to the work
-  (an example is provided in the Appendix below).
-
-  Derivative Works shall mean any work, whether in Source or 
Object
-  form, that is based on (or derived from) the Work and for which the
-  editorial revisions, annotations, elaborations, or other modifications
-  represent, as a whole, an original work of authorship. For the purposes
-  of this License, Derivative Works shall not include works that remain
-  separable from, or merely link (or bind by name) to the interfaces of,
-  the Work and Derivative Works thereof.
-
-  Contribution shall mean any work of authorship, including
-  the original version of the Work and any modifications or additions
-  to that Work or Derivative Works thereof, that is intentionally
-  submitted to Licensor for inclusion in the Work by the copyright owner
-  or by an individual or Legal Entity authorized to submit on behalf of
-  the copyright owner. For the purposes of this definition, 
submitted
-  means any form of electronic, verbal, or written communication sent
-  to the Licensor or its representatives, including but not limited to
-  communication on electronic mailing lists, source code control systems,
-  and issue tracking systems that are managed by, or on behalf of, the
-  Licensor for the purpose of discussing and improving the Work, but
-  excluding communication that is conspicuously marked or otherwise
-  designated in writing by the copyright owner as Not a 
Contribution.
-
-  Contributor shall mean Licensor and any individual or Legal 
Entity
-  on behalf of whom a Contribution has been received by Licensor and
-  subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  copyright license to reproduce, prepare Derivative Works of,
-  publicly display, publicly perform, sublicense, and distribute the
-  Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-  this License, each Contributor 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
index cd53b56..5f8eff1 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
@@ -593,10 +593,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private HTableDescriptor
-HBaseAdmin.CreateTableFuture.desc
-
-
-private HTableDescriptor
 TableSnapshotScanner.htd
 
 
@@ -611,7 +607,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.deleteTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-Delete tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.util.regex.Pattern)
+ and Admin.enableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -623,7 +624,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.deleteTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
-Deletes tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.lang.String)
+ and Admin.enableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -633,7 +639,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.disableTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-Disable tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.util.regex.Pattern)
+ and Admin.disableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -643,7 +654,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.disableTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
-Disable tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.lang.String)
+ and Admin.disableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -653,7 +669,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.enableTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-Enable tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.util.regex.Pattern)
+ and Admin.enableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -663,7 +684,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 HTableDescriptor[]
 Admin.enableTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
-Enable tables matching the passed in pattern and wait on 
completion.
+Deprecated.
+since 2.0 version and will 
be removed in 3.0 version
+ This is just a trivial helper method without any magic.
+ Consider using Admin.listTableDescriptors(java.lang.String)
+ and Admin.enableTable(org.apache.hadoop.hbase.TableName)
+
 
 
 
@@ -671,48 +697,52 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 HBaseAdmin.enableTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringregex)
 
 
+(package private) static HTableDescriptor
+HBaseAdmin.getHTableDescriptor(TableNametableName,
+  

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index 43db01d..79dc4e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -235,7 +235,7 @@
 227  public BucketCache(String ioEngineName, 
long capacity, int blockSize, int[] bucketSizes,
 228  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
 229  throws FileNotFoundException, 
IOException {
-230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity);
+230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity, persistencePath);
 231this.writerThreads = new 
WriterThread[writerThreadNum];
 232long blockNumCapacity = capacity / 
blockSize;
 233if (blockNumCapacity = 
Integer.MAX_VALUE) {
@@ -317,1229 +317,1230 @@
 309   * Get the IOEngine from the IO engine 
name
 310   * @param ioEngineName
 311   * @param capacity
-312   * @return the IOEngine
-313   * @throws IOException
-314   */
-315  private IOEngine 
getIOEngineFromName(String ioEngineName, long capacity)
-316  throws IOException {
-317if (ioEngineName.startsWith("file:") 
|| ioEngineName.startsWith("files:")) {
-318  // In order to make the usage 
simple, we only need the prefix 'files:' in
-319  // document whether one or multiple 
file(s), but also support 'file:' for
-320  // the compatibility
-321  String[] filePaths = 
ioEngineName.substring(ioEngineName.indexOf(":") + 1)
-322  
.split(FileIOEngine.FILE_DELIMITER);
-323  return new FileIOEngine(capacity, 
filePaths);
-324} else if 
(ioEngineName.startsWith("offheap")) {
-325  return new 
ByteBufferIOEngine(capacity, true);
-326} else if 
(ioEngineName.startsWith("heap")) {
-327  return new 
ByteBufferIOEngine(capacity, false);
-328} else if 
(ioEngineName.startsWith("mmap:")) {
-329  return new 
FileMmapEngine(ioEngineName.substring(5), capacity);
-330} else {
-331  throw new 
IllegalArgumentException(
-332  "Don't understand io engine 
name for cache - prefix with file:, heap or offheap");
-333}
-334  }
-335
-336  /**
-337   * Cache the block with the specified 
name and buffer.
-338   * @param cacheKey block's cache key
-339   * @param buf block buffer
-340   */
-341  @Override
-342  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable buf) {
-343cacheBlock(cacheKey, buf, false, 
false);
-344  }
-345
-346  /**
-347   * Cache the block with the specified 
name and buffer.
-348   * @param cacheKey block's cache key
-349   * @param cachedItem block buffer
-350   * @param inMemory if block is 
in-memory
-351   * @param cacheDataInL1
-352   */
-353  @Override
-354  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable cachedItem, boolean inMemory,
-355  final boolean cacheDataInL1) {
-356cacheBlockWithWait(cacheKey, 
cachedItem, inMemory, wait_when_cache);
-357  }
-358
-359  /**
-360   * Cache the block to ramCache
-361   * @param cacheKey block's cache key
-362   * @param cachedItem block buffer
-363   * @param inMemory if block is 
in-memory
-364   * @param wait if true, blocking wait 
when queue is full
-365   */
-366  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
-367  boolean wait) {
-368if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-369if (!cacheEnabled) {
-370  return;
-371}
-372
-373if (backingMap.containsKey(cacheKey)) 
{
-374  return;
-375}
-376
-377/*
-378 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-379 */
-380RAMQueueEntry re =
-381new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-382if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-383  return;
-384}
-385int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-386BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-387boolean successfulAddition = false;
-388if (wait) {
-389  try {
-390successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-391  } catch (InterruptedException e) 
{
-392
Thread.currentThread().interrupt();
-393  }
-394} else {
-395  successfulAddition = 
bq.offer(re);
-396}
-397if (!successfulAddition) {
-398  ramCache.remove(cacheKey);
-399  cacheStats.failInsert();
-400} else {
-401  
this.blockNumber.incrementAndGet();
-402  

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
new file mode 100644
index 000..90bc7c8
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -0,0 +1,395 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RawAsyncHBaseAdmin.TableProcedureBiConsumer (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":6,"i2":10,"i3":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true;
 title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+
+
+Direct Known Subclasses:
+RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer,
 RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer,
 RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer,
 RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer,
 RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer,
 RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer,
 RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer,
 RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer,
 RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer
+
+
+Enclosing class:
+RawAsyncHBaseAdmin
+
+
+
+private abstract class RawAsyncHBaseAdmin.TableProcedureBiConsumer
+extends RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+protected TableName
+tableName
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+admin
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TableProcedureBiConsumer(AsyncAdminadmin,
+TableNametableName)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getDescription()
+
+
+(package private) abstract http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getOperationType()
+
+
+(package private) void
+onError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwableerror)
+
+
+(package private) void
+onFinished()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+accept
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
index e65748d..91a0ffa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
@@ -372,1874 +372,1873 @@
 364   * is stored in the name, so the 
returned object should only be used for the fields
 365   * in the regionName.
 366   */
-367  protected static HRegionInfo 
parseRegionInfoFromRegionName(byte[] regionName)
-368throws IOException {
-369byte[][] fields = 
HRegionInfo.parseRegionName(regionName);
-370long regionId =  
Long.parseLong(Bytes.toString(fields[2]));
-371int replicaId = fields.length  3 
? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
-372return new HRegionInfo(
-373  TableName.valueOf(fields[0]), 
fields[1], fields[1], false, regionId, replicaId);
-374  }
-375
-376  /**
-377   * Gets the result in hbase:meta for 
the specified region.
-378   * @param connection connection we're 
using
-379   * @param regionName region we're 
looking for
-380   * @return result of the specified 
region
-381   * @throws IOException
-382   */
-383  public static Result 
getRegionResult(Connection connection,
-384  byte[] regionName) throws 
IOException {
-385Get get = new Get(regionName);
-386
get.addFamily(HConstants.CATALOG_FAMILY);
-387return get(getMetaHTable(connection), 
get);
-388  }
-389
-390  /**
-391   * Get regions from the merge qualifier 
of the specified merged region
-392   * @return null if it doesn't contain 
merge qualifier, else two merge regions
-393   * @throws IOException
-394   */
-395  @Nullable
-396  public static PairHRegionInfo, 
HRegionInfo getRegionsFromMergeQualifier(
-397  Connection connection, byte[] 
regionName) throws IOException {
-398Result result = 
getRegionResult(connection, regionName);
-399HRegionInfo mergeA = 
getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
-400HRegionInfo mergeB = 
getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
-401if (mergeA == null  mergeB 
== null) {
-402  return null;
-403}
-404return new Pair(mergeA, 
mergeB);
-405 }
-406
-407  /**
-408   * Checks if the specified table 
exists.  Looks at the hbase:meta table hosted on
-409   * the specified server.
-410   * @param connection connection we're 
using
-411   * @param tableName table to check
-412   * @return true if the table exists in 
meta, false if not
-413   * @throws IOException
-414   */
-415  public static boolean 
tableExists(Connection connection,
-416  final TableName tableName)
-417  throws IOException {
-418// Catalog tables always exist.
-419return 
tableName.equals(TableName.META_TABLE_NAME)
-420|| getTableState(connection, 
tableName) != null;
-421  }
-422
-423  /**
-424   * Lists all of the regions currently 
in META.
-425   *
-426   * @param connection to connect with
-427   * @param excludeOfflinedSplitParents 
False if we are to include offlined/splitparents regions,
-428   *
true and we'll leave out offlined regions from returned list
-429   * @return List of all user-space 
regions.
-430   * @throws IOException
-431   */
-432  @VisibleForTesting
-433  public static ListHRegionInfo 
getAllRegions(Connection connection,
-434  boolean 
excludeOfflinedSplitParents)
-435  throws IOException {
-436ListPairHRegionInfo, 
ServerName result;
-437
-438result = 
getTableRegionsAndLocations(connection, null,
-439excludeOfflinedSplitParents);
-440
-441return 
getListOfHRegionInfos(result);
-442
-443  }
-444
-445  /**
-446   * Gets all of the regions of the 
specified table. Do not use this method
-447   * to get meta table regions, use 
methods in MetaTableLocator instead.
-448   * @param connection connection we're 
using
-449   * @param tableName table we're looking 
for
-450   * @return Ordered list of {@link 
HRegionInfo}.
-451   * @throws IOException
-452   */
-453  public static ListHRegionInfo 
getTableRegions(Connection connection, TableName tableName)
-454  throws IOException {
-455return getTableRegions(connection, 
tableName, false);
-456  }
-457
-458  /**
-459   * Gets all of the regions of the 
specified table. Do not use this method
-460   * to get meta table regions, use 
methods in MetaTableLocator instead.
-461   * @param connection connection we're 
using
-462   * @param tableName table we're looking 
for
-463   * @param excludeOfflinedSplitParents 
If true, do not include offlined split
-464   * parents in the return.
-465   * @return Ordered list of {@link 
HRegionInfo}.
-466   * @throws IOException
-467   */

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemstoreSizeCostFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * pre
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.lt;namespacegt; q:s  
   lt;global-quotasgt;
-076 *   t.lt;namespacegt; u:p  
  lt;namespace-quota policygt;
-077 *   t.lt;tablegt; q:s  
   lt;global-quotasgt;
-078 *   t.lt;tablegt; u:p  
  lt;table-quota policygt;
-079 *   u.lt;usergt;  q:s  
   lt;global-quotasgt;
-080 *   u.lt;usergt;  
q:s.lt;tablegt; lt;table-quotasgt;
-081 *   u.lt;usergt;  
q:s.lt;nsgt;:   lt;namespace-quotasgt;
-082 * /pre
-083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class QuotaTableUtil {
-087  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-088
-089  /** System table for quotas */
-090  public static final TableName 
QUOTA_TABLE_NAME =
-091  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-092
-093  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-094  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-095  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-096  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-097  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-098  protected static final String 
QUOTA_POLICY_COLUMN =
-099  Bytes.toString(QUOTA_FAMILY_USAGE) 
+ ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY);
-100  protected static final byte[] 
QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
-101  protected static final byte[] 
QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
-102  protected static final byte[] 
QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
-103
-104  /* 
=
-105   *  Quota "settings" helpers
-106   */
-107  public static Quotas 
getTableQuota(final Connection connection, final TableName table)
-108  throws IOException {
-109return getQuotas(connection, 
getTableRowKey(table));
-110  }
-111
-112  public static Quotas 
getNamespaceQuota(final Connection connection, final String namespace)
-113  throws IOException {
-114return getQuotas(connection, 
getNamespaceRowKey(namespace));
-115  }
-116
-117  public static Quotas getUserQuota(final 
Connection connection, final String user)
-118  throws IOException {
-119return 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
new file mode 100644
index 000..2888fc4
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -0,0 +1,2240 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ColumnFamilyDescriptorBuilder (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class 
ColumnFamilyDescriptorBuilder
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
+
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+public class ColumnFamilyDescriptorBuilder
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+An ModifyableFamilyDescriptor contains information about a 
column family such as the
+ number of versions, compression settings, etc.
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKCACHE
+Key for the BLOCKCACHE attribute.
+
+
+
+private static Bytes
+BLOCKCACHE_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOCKSIZE
+Size of storefile/hfile 'blocks'.
+
+
+
+private static Bytes
+BLOCKSIZE_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+BLOOMFILTER
+
+
+private static Bytes
+BLOOMFILTER_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_BLOOMS_ON_WRITE
+
+
+private static Bytes
+CACHE_BLOOMS_ON_WRITE_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_IN_L1
+Key for cache data into L1 if cache is set up with more 
than one tier.
+
+
+
+private static Bytes
+CACHE_DATA_IN_L1_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_DATA_ON_WRITE
+
+
+private static Bytes
+CACHE_DATA_ON_WRITE_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CACHE_INDEX_ON_WRITE
+
+
+private static Bytes
+CACHE_INDEX_ON_WRITE_BYTES
+
+
+private static byte
+COLUMN_DESCRIPTOR_VERSION
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESS_TAGS
+
+
+private static Bytes
+COMPRESS_TAGS_BYTES
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+COMPRESSION

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HealthChecker.html 
b/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
index e2cd40b..be8e5e2 100644
--- a/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
+++ b/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
@@ -4,7 +4,7 @@
 
 
 
-HealthChecker (Apache HBase 2.0.0-SNAPSHOT API)
+HealthChecker (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-HealthReport (Apache HBase 2.0.0-SNAPSHOT API)
+HealthReport (Apache HBase 3.0.0-SNAPSHOT API)