[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
index 1a130a3..7e8fff8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
@@ -1761,7 +1761,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 writeRegionInfoOnFilesystem
-privatevoidwriteRegionInfoOnFilesystem(booleanuseTempDir)
+privatevoidwriteRegionInfoOnFilesystem(booleanuseTempDir)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Write out an info file under the region directory. Useful 
recovering mangled regions.
 
@@ -1778,7 +1778,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 writeRegionInfoOnFilesystem
-privatevoidwriteRegionInfoOnFilesystem(byte[]regionInfoContent,
+privatevoidwriteRegionInfoOnFilesystem(byte[]regionInfoContent,
  booleanuseTempDir)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Write out an info file under the region directory. Useful 
recovering mangled regions.
@@ -1797,7 +1797,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 createRegionOnFileSystem
-public staticHRegionFileSystemcreateRegionOnFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticHRegionFileSystemcreateRegionOnFileSystem(org.apache.hadoop.conf.Configurationconf,
  
org.apache.hadoop.fs.FileSystemfs,
  
org.apache.hadoop.fs.PathtableDir,
  HRegionInforegionInfo)
@@ -1820,7 +1820,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 openRegionFromFileSystem
-public staticHRegionFileSystemopenRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticHRegionFileSystemopenRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
  
org.apache.hadoop.fs.FileSystemfs,
  
org.apache.hadoop.fs.PathtableDir,
  HRegionInforegionInfo,
@@ -1845,7 +1845,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 deleteRegionFromFileSystem
-public staticvoiddeleteRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticvoiddeleteRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.fs.FileSystemfs,
   
org.apache.hadoop.fs.PathtableDir,
   HRegionInforegionInfo)
@@ -1868,7 +1868,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 createDir
-booleancreateDir(org.apache.hadoop.fs.Pathdir)
+booleancreateDir(org.apache.hadoop.fs.Pathdir)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Creates a directory. Assumes the user has already checked 
for this directory existence.
 
@@ -1888,7 +1888,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 rename
-booleanrename(org.apache.hadoop.fs.Pathsrcpath,
+booleanrename(org.apache.hadoop.fs.Pathsrcpath,
org.apache.hadoop.fs.PathdstPath)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Renames a directory. Assumes the user has already checked 
for this directory existence.
@@ -1909,7 +1909,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 deleteDir
-booleandeleteDir(org.apache.hadoop.fs.Pathdir)
+booleandeleteDir(org.apache.hadoop.fs.Pathdir)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deletes a directory. Assumes the user has already checked 
for this directory existence.
 
@@ -1928,7 +1928,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 sleepBeforeRetry
-privatevoidsleepBeforeRetry(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg,

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index c7a919c..29e186b 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -170,7 +170,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  HRegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -179,7 +179,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  HRegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -312,6 +312,14 @@ service.
 HTableMultiplexer.FlushWorker.addr
 
 
+HRegionLocation
+AsyncClientScanner.OpenScannerResponse.loc
+
+
+private HRegionLocation
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.loc
+
+
 private HRegionLocation
 AsyncScanSingleRegionRpcRetryingCaller.loc
 
@@ -320,23 +328,15 @@ service.
 AsyncBatchRpcRetryingCaller.RegionRequest.loc
 
 
-HRegionLocation
-AsyncClientScanner.OpenScannerResponse.loc
+protected HRegionLocation
+RegionAdminServiceCallable.location
 
 
-private HRegionLocation
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.loc
-
-
 protected HRegionLocation
 RegionServerCallable.location
 Some subclasses want to set their own location.
 
 
-
-protected HRegionLocation
-RegionAdminServiceCallable.location
-
 
 
 
@@ -379,11 +379,11 @@ service.
 
 
 protected HRegionLocation
-RegionServerCallable.getLocation()
+MultiServerCallable.getLocation()
 
 
 protected HRegionLocation
-MultiServerCallable.getLocation()
+RegionServerCallable.getLocation()
 
 
 HRegionLocation
@@ -391,43 +391,43 @@ service.
 
 
 HRegionLocation
-RegionLocator.getRegionLocation(byte[]row)
+HRegionLocator.getRegionLocation(byte[]row)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-HRegionLocator.getRegionLocation(byte[]row)
+RegionLocator.getRegionLocation(byte[]row)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-RegionLocator.getRegionLocation(byte[]row,
+HRegionLocator.getRegionLocation(byte[]row,
  booleanreload)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-HRegionLocator.getRegionLocation(byte[]row,
+RegionLocator.getRegionLocation(byte[]row,
  booleanreload)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-ClusterConnection.getRegionLocation(TableNametableName,
+ConnectionImplementation.getRegionLocation(TableNametableName,
  byte[]row,
- booleanreload)
-Find region location hosting passed row
-
+ booleanreload)
 
 
 HRegionLocation
-ConnectionImplementation.getRegionLocation(TableNametableName,
+ClusterConnection.getRegionLocation(TableNametableName,
  byte[]row,
- booleanreload)
+ booleanreload)
+Find region location hosting passed row
+
 
 
 private HRegionLocation
@@ -442,15 +442,20 @@ service.
 
 
 HRegionLocation
+ConnectionImplementation.locateRegion(byte[]regionName)
+
+
+HRegionLocation
 ClusterConnection.locateRegion(byte[]regionName)
 Gets the location of the region of regionName.
 
 
-
+
 HRegionLocation
-ConnectionImplementation.locateRegion(byte[]regionName)
+ConnectionImplementation.locateRegion(TableNametableName,
+byte[]row)
 
-
+
 HRegionLocation
 ClusterConnection.locateRegion(TableNametableName,
 byte[]row)
@@ -458,11 +463,6 @@ service.
  lives in.
 
 
-
-HRegionLocation
-ConnectionImplementation.locateRegion(TableNametableName,
-byte[]row)
-
 
 private HRegionLocation
 AsyncNonMetaRegionLocator.locateRowBeforeInCache(AsyncNonMetaRegionLocator.TableCachetableCache,
@@ -477,17 +477,17 @@ service.
 
 
 HRegionLocation
+ConnectionImplementation.relocateRegion(TableNametableName,
+  byte[]row)
+
+
+HRegionLocation
 ClusterConnection.relocateRegion(TableNametableName,
   byte[]row)
 Find the location of the region of tableName that 
row
  lives in, ignoring any value that might be in the cache.
 
 
-
-HRegionLocation
-ConnectionImplementation.relocateRegion(TableNametableName,
-  byte[]row)
-
 
 
 
@@ -499,13 +499,13 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 5c95397..860416b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
was flushed. And, skip the edits which
-342  // are equal to or 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
index 006b961..e17cadf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class WALSplitter.LogRecoveredEditsOutputSink
+class WALSplitter.LogRecoveredEditsOutputSink
 extends WALSplitter.OutputSink
 Class that manages the output streams from the log 
splitting process.
 
@@ -261,7 +261,7 @@ extends 
 
 LogRecoveredEditsOutputSink
-publicLogRecoveredEditsOutputSink(WALSplitter.PipelineControllercontroller,
+publicLogRecoveredEditsOutputSink(WALSplitter.PipelineControllercontroller,
WALSplitter.EntryBuffersentryBuffers,
intnumWriters)
 
@@ -280,7 +280,7 @@ extends 
 
 finishWritingAndClose
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathfinishWritingAndClose()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathfinishWritingAndClose()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -298,7 +298,7 @@ extends 
 
 deleteOneWithFewerEntries
-privatevoiddeleteOneWithFewerEntries(WALSplitter.WriterAndPathwap,
+privatevoiddeleteOneWithFewerEntries(WALSplitter.WriterAndPathwap,
org.apache.hadoop.fs.Pathdst)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -313,7 +313,7 @@ extends 
 
 close
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Pathclose()
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Pathclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Close all of the output streams.
 
@@ -330,7 +330,7 @@ extends 
 
 closeLogWriters
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptioncloseLogWriters(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptionthrown)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptioncloseLogWriters(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptionthrown)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -344,7 +344,7 @@ extends 
 
 getWriterAndPath
-privateWALSplitter.WriterAndPathgetWriterAndPath(WAL.Entryentry)
+privateWALSplitter.WriterAndPathgetWriterAndPath(WAL.Entryentry)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Get a writer and path for a log starting at the given 
entry. This function is threadsafe so
  long as multiple threads are always acting on different regions.
@@ -362,7 +362,7 @@ extends 
 
 createWAP
-privateWALSplitter.WriterAndPathcreateWAP(byte[]region,
+privateWALSplitter.WriterAndPathcreateWAP(byte[]region,
 WAL.Entryentry,
  

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 49ef112..b3d1843 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -141,3316 +141,3314 @@
 133import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 134import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
 135import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-136import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-137import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-138import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-139import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-140import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
-141import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-142import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-143import 
org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-200import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 5479fb1..49ef112 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -123,3388 +123,3334 @@
 115import 
org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
 116import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
 117import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-118import 
org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure;
-119import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-120import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-121import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-122import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-123import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-124import 
org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-127import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-128import 
org.apache.hadoop.hbase.master.replication.ReplicationManager;
-129import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-130import 
org.apache.hadoop.hbase.mob.MobConstants;
-131import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-132import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-133import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-134import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-135import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-136import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-137import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-138import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-139import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-140import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
-142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-146import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-147import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-148import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-149import 
org.apache.hadoop.hbase.regionserver.HStore;
-150import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-151import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-152import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-154import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-155import 
org.apache.hadoop.hbase.replication.ReplicationException;
-156import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-158import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-159import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-160import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-161import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-162import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-163import 
org.apache.hadoop.hbase.security.UserProvider;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-170import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-171import 
org.apache.hadoop.hbase.util.Addressing;
-172import 
org.apache.hadoop.hbase.util.Bytes;
-173import 
org.apache.hadoop.hbase.util.CompressionTest;
-174import 
org.apache.hadoop.hbase.util.EncryptionTest;
-175import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index d4b122e..3611208 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -166,10 +166,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
 org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupType
+org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
index f93e75e..cf1ea95 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
@@ -471,8 +471,9 @@ service.
 
 
 HColumnDescriptor(HColumnDescriptordesc)
-Deprecated.
-Constructor.
+Deprecated.
+use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index 6de1930..fd81ad6 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -1757,13 +1757,14 @@
AsyncServerRequestRpcRetryingCaller.CallableTcallable)
 
 
-MultiServerCallable(ClusterConnectionconnection,
+MultiServerCallable(ClusterConnectionconnection,
TableNametableName,
ServerNamelocation,
MultiActionmulti,

org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllerrpcController,
intrpcTimeout,
-   RetryingTimeTrackertracker)
+   RetryingTimeTrackertracker,
+   intpriority)
 
 
 SingleServerRequestRunnable(MultiActionmultiAction,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 5933927..336cc3b 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -4226,12 +4226,13 @@ service.
 BufferedMutatorParams(TableNametableName)
 
 
-CancellableRegionServerCallable(Connectionconnection,
+CancellableRegionServerCallable(Connectionconnection,
TableNametableName,
byte[]row,

org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllerrpcController,
intrpcTimeout,
-   RetryingTimeTrackertracker)
+   RetryingTimeTrackertracker,
+   intpriority)
 
 
 ClientAsyncPrefetchScanner(org.apache.hadoop.conf.Configurationconfiguration,
@@ -4257,10 +4258,11 @@ service.
 
 
 
-ClientServiceCallable(Connectionconnection,
+ClientServiceCallable(Connectionconnection,
  TableNametableName,
  byte[]row,
- 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllerrpcController)
+ 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllerrpcController,
+ intpriority)
 
 
 ClientSimpleScanner(org.apache.hadoop.conf.Configurationconfiguration,
@@ -4381,19 +4383,21 @@ service.
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponseresponse)
 
 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Store.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Store.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Store.html
index 79eba86..ddaf2f1 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Store.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Store.html
@@ -150,20 +150,20 @@
 
 
 default void
-RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
+RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
 Storestore,
-com.google.common.collect.ImmutableListStoreFileselected)
+
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFileselected)
 Deprecated.
-use RegionObserver.postCompactSelection(ObserverContext,
 Store, ImmutableList,
+use RegionObserver.postCompactSelection(ObserverContext,
 Store, ImmutableList,
  CompactionRequest) instead.
 
 
 
 
 default void
-RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
+RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
 Storestore,
-com.google.common.collect.ImmutableListStoreFileselected,
+
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFileselected,
 CompactionRequestrequest)
 Called after the StoreFiles to compact 
have been selected from the available
  candidates.
@@ -697,8 +697,8 @@
 
 
 void
-RegionCoprocessorHost.postCompactSelection(Storestore,
-com.google.common.collect.ImmutableListStoreFileselected,
+RegionCoprocessorHost.postCompactSelection(Storestore,
+
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFileselected,
 CompactionRequestrequest,
 Useruser)
 Called after the StoreFiles to be 
compacted have been selected from the available

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
index 2307512..1338464 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
@@ -236,20 +236,20 @@
 
 
 default void
-RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
+RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
 Storestore,
-com.google.common.collect.ImmutableListStoreFileselected)
+
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFileselected)
 Deprecated.
-use RegionObserver.postCompactSelection(ObserverContext,
 Store, ImmutableList,
+use RegionObserver.postCompactSelection(ObserverContext,
 Store, ImmutableList,
  CompactionRequest) instead.
 
 
 
 
 default void
-RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
+RegionObserver.postCompactSelection(ObserverContextRegionCoprocessorEnvironmentc,
 Storestore,
-com.google.common.collect.ImmutableListStoreFileselected,
+
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFileselected,
 CompactionRequestrequest)
 Called after the StoreFiles to compact 
have been selected from the available
  candidates.
@@ -514,11 +514,11 @@
 
 
 
-private 
com.google.common.collect.ImmutableListStoreFile
+private 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFile
 StripeStoreFileManager.State.allCompactedFilesCached
 
 
-com.google.common.collect.ImmutableListStoreFile
+org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFile
 StripeStoreFileManager.State.allFilesCached
 Cached list of all files in the structure, to return from 
some calls
 
@@ -557,7 +557,7 @@
 StripeStoreFileManager.CompactionOrFlushMergeCopy.l0Results
 
 
-com.google.common.collect.ImmutableListStoreFile
+org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFile
 StripeStoreFileManager.State.level0Files
 Level 0.
 
@@ -587,13 +587,13 @@
 DefaultStoreFileManager.storeFileComparator
 
 
-private 
com.google.common.collect.ImmutableListStoreFile
+private 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListStoreFile
 DefaultStoreFileManager.storefiles
 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+191import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
index b32645e..7968115 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
@@ -30,590 +30,590 @@
 022import java.io.IOException;
 023import java.io.InterruptedIOException;
 024import java.util.ArrayList;
-025import java.util.HashMap;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.NavigableSet;
-029import 
java.util.concurrent.CountDownLatch;
-030import 
java.util.concurrent.locks.ReentrantLock;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import org.apache.hadoop.hbase.Cell;
-035import 
org.apache.hadoop.hbase.CellComparator;
-036import 
org.apache.hadoop.hbase.CellUtil;
-037import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.KeyValue;
-040import 
org.apache.hadoop.hbase.KeyValueUtil;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.IsolationLevel;
-043import 
org.apache.hadoop.hbase.client.Scan;
-044import 
org.apache.hadoop.hbase.executor.ExecutorService;
-045import 
org.apache.hadoop.hbase.filter.Filter;
-046import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-047import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-048import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
-049import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
-050import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
-051import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-052import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
-053import 
org.apache.hadoop.hbase.util.CollectionUtils;
-054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+025import java.util.List;
+026import java.util.NavigableSet;
+027import 
java.util.concurrent.CountDownLatch;
+028import 
java.util.concurrent.locks.ReentrantLock;
+029
+030import org.apache.commons.logging.Log;
+031import 
org.apache.commons.logging.LogFactory;
+032import org.apache.hadoop.hbase.Cell;
+033import 
org.apache.hadoop.hbase.CellComparator;
+034import 
org.apache.hadoop.hbase.CellUtil;
+035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.KeyValue;
+038import 
org.apache.hadoop.hbase.KeyValueUtil;
+039import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+040import 
org.apache.hadoop.hbase.client.IsolationLevel;
+041import 
org.apache.hadoop.hbase.client.Scan;
+042import 
org.apache.hadoop.hbase.executor.ExecutorService;
+043import 
org.apache.hadoop.hbase.filter.Filter;
+044import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
+045import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
+046import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
+047import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
+048import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
+049import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
+050import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
+051import 
org.apache.hadoop.hbase.util.CollectionUtils;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
 055
-056import 
com.google.common.annotations.VisibleForTesting;
-057
-058/**
-059 * Scanner scans both the memstore and 
the Store. Coalesce KeyValue stream into Listlt;KeyValuegt;
-060 * for a single row.
-061 * p
-062 * The implementation is not thread safe. 
So there will be no race between next and close. The only
-063 * exception is updateReaders, it will be 
called in the memstore flush thread to indicate that there
-064 * is a flush.
-065 */
-066@InterfaceAudience.Private
-067public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
-068implements KeyValueScanner, 
InternalScanner, ChangedReadersObserver {
-069  private static final Log LOG = 
LogFactory.getLog(StoreScanner.class);
-070  // In unit tests, the store could be 
null
-071  protected final Store store;
-072  private ScanQueryMatcher matcher;
-073  protected 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationState.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  HColumnDescriptor[] columns = 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 945d1bd..a2e2cbd 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2242,
- Errors: 14711,
+ Errors: 14778,
  Warnings: 0,
  Infos: 0
   
@@ -279,7 +279,7 @@ under the License.
   0
 
 
-  0
+  1
 
   
   
@@ -1007,7 +1007,7 @@ under the License.
   0
 
 
-  52
+  54
 
   
   
@@ -1231,7 +1231,7 @@ under the License.
   0
 
 
-  7
+  8
 
   
   
@@ -1273,7 +1273,7 @@ under the License.
   0
 
 
-  2
+  3
 
   
   
@@ -1329,7 +1329,7 @@ under the License.
   0
 
 
-  4
+  5
 
   
   
@@ -5627,7 +5627,7 @@ under the License.
   0
 
 
-  185
+  186
 
   
   
@@ -6551,7 +6551,7 @@ under the License.
   0
 
 
-  9
+  11
 
   
   
@@ -6775,7 +6775,7 @@ under the License.
   0
 
 
-  1
+  2
 
   
   
@@ -8035,7 +8035,7 @@ under the License.
   0
 
 
-  9
+  10
 
   
   
@@ -11073,7 +11073,7 @@ under the License.
   0
 
 
-  2
+  3
 
   
   
@@ -11423,7 +11423,7 @@ under the License.
   0
 
 
-  20
+  24
 
   
   
@@ -12207,7 +12207,7 @@ under the License.
   0
 
 
-  8
+  9
 
   
   
@@ -16449,7 +16449,7 @@ under the License.
   0
 
 
-  34
+  35
 
   
   
@@ -17933,7 +17933,7 @@ under the License.
   0
 
 
-  4
+  5
 
   
   
@@ -19935,7 +19935,7 @@ under the License.
   0
 
 
-  12
+  15
 
   
   
@@ -20957,7 +20957,7 @@ under the License.
   0
 
 
-  5
+  6
 
   
   
@@ -20999,7 +20999,7 @@ under the License.
   0
 
 
-  8
+  9
 
   
   
@@ -21461,7 +21461,7 @@ under the License.
   0
 
 
-  0
+  1
 
   
   
@@ -21685,7 +21685,7 @@ under the License.
   0
 
 
-  17
+  18
 
   
   
@@ -24401,7 +24401,7 @@ under the License.
   0
 
 
-  0
+  1
 
   
   
@@ -25213,7 +25213,7 @@ under the License.
   0
 
 
-  14
+  15
 
   
   
@@ -26487,7 +26487,7 @@ under the License.
   0
 
 
-  11
+  12
 
   
   
@@ -27145,7 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index 43db01d..79dc4e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -235,7 +235,7 @@
 227  public BucketCache(String ioEngineName, 
long capacity, int blockSize, int[] bucketSizes,
 228  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
 229  throws FileNotFoundException, 
IOException {
-230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity);
+230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity, persistencePath);
 231this.writerThreads = new 
WriterThread[writerThreadNum];
 232long blockNumCapacity = capacity / 
blockSize;
 233if (blockNumCapacity = 
Integer.MAX_VALUE) {
@@ -317,1229 +317,1230 @@
 309   * Get the IOEngine from the IO engine 
name
 310   * @param ioEngineName
 311   * @param capacity
-312   * @return the IOEngine
-313   * @throws IOException
-314   */
-315  private IOEngine 
getIOEngineFromName(String ioEngineName, long capacity)
-316  throws IOException {
-317if (ioEngineName.startsWith("file:") 
|| ioEngineName.startsWith("files:")) {
-318  // In order to make the usage 
simple, we only need the prefix 'files:' in
-319  // document whether one or multiple 
file(s), but also support 'file:' for
-320  // the compatibility
-321  String[] filePaths = 
ioEngineName.substring(ioEngineName.indexOf(":") + 1)
-322  
.split(FileIOEngine.FILE_DELIMITER);
-323  return new FileIOEngine(capacity, 
filePaths);
-324} else if 
(ioEngineName.startsWith("offheap")) {
-325  return new 
ByteBufferIOEngine(capacity, true);
-326} else if 
(ioEngineName.startsWith("heap")) {
-327  return new 
ByteBufferIOEngine(capacity, false);
-328} else if 
(ioEngineName.startsWith("mmap:")) {
-329  return new 
FileMmapEngine(ioEngineName.substring(5), capacity);
-330} else {
-331  throw new 
IllegalArgumentException(
-332  "Don't understand io engine 
name for cache - prefix with file:, heap or offheap");
-333}
-334  }
-335
-336  /**
-337   * Cache the block with the specified 
name and buffer.
-338   * @param cacheKey block's cache key
-339   * @param buf block buffer
-340   */
-341  @Override
-342  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable buf) {
-343cacheBlock(cacheKey, buf, false, 
false);
-344  }
-345
-346  /**
-347   * Cache the block with the specified 
name and buffer.
-348   * @param cacheKey block's cache key
-349   * @param cachedItem block buffer
-350   * @param inMemory if block is 
in-memory
-351   * @param cacheDataInL1
-352   */
-353  @Override
-354  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable cachedItem, boolean inMemory,
-355  final boolean cacheDataInL1) {
-356cacheBlockWithWait(cacheKey, 
cachedItem, inMemory, wait_when_cache);
-357  }
-358
-359  /**
-360   * Cache the block to ramCache
-361   * @param cacheKey block's cache key
-362   * @param cachedItem block buffer
-363   * @param inMemory if block is 
in-memory
-364   * @param wait if true, blocking wait 
when queue is full
-365   */
-366  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
-367  boolean wait) {
-368if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-369if (!cacheEnabled) {
-370  return;
-371}
-372
-373if (backingMap.containsKey(cacheKey)) 
{
-374  return;
-375}
-376
-377/*
-378 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-379 */
-380RAMQueueEntry re =
-381new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-382if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-383  return;
-384}
-385int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-386BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-387boolean successfulAddition = false;
-388if (wait) {
-389  try {
-390successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-391  } catch (InterruptedException e) 
{
-392
Thread.currentThread().interrupt();
-393  }
-394} else {
-395  successfulAddition = 
bq.offer(re);
-396}
-397if (!successfulAddition) {
-398  ramCache.remove(cacheKey);
-399  cacheStats.failInsert();
-400} else {
-401  

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 4806bdd..d1eae05 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -124,97 +124,6 @@ implements 
 
 
-
-
-
-
-
-Nested Class Summary
-
-Nested Classes
-
-Modifier and Type
-Class and Description
-
-
-private class
-AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
-
-
-private static interface
-AsyncHBaseAdmin.AdminRpcCallRESP,REQ
-
-
-private static interface
-AsyncHBaseAdmin.ConverterD,S
-
-
-private class
-AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.CreateTableProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.DeleteTableProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.DisableTableProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.EnableTableProcedureBiConsumer
-
-
-private static interface
-AsyncHBaseAdmin.MasterRpcCallRESP,REQ
-
-
-private class
-AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.NamespaceProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.ProcedureBiConsumer
-
-
-private static interface
-AsyncHBaseAdmin.TableOperator
-
-
-private class
-AsyncHBaseAdmin.TableProcedureBiConsumer
-
-
-private class
-AsyncHBaseAdmin.TruncateTableProcedureBiConsumer
-
-
-
-
 
 
 
@@ -228,44 +137,16 @@ implements Field and Description
 
 
-private AsyncConnectionImpl
-connection
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-FLUSH_TABLE_PROCEDURE_SIGNATURE
-
-
 private static 
org.apache.commons.logging.Log
 LOG
 
 
-private int
-maxAttempts
-
-
-private RawAsyncTable
-metaTable
-
-
-private NonceGenerator
-ng
-
-
-private long
-operationTimeoutNs
-
-
-private long
-pauseNs
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
index e65748d..91a0ffa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
@@ -372,1874 +372,1873 @@
 364   * is stored in the name, so the 
returned object should only be used for the fields
 365   * in the regionName.
 366   */
-367  protected static HRegionInfo 
parseRegionInfoFromRegionName(byte[] regionName)
-368throws IOException {
-369byte[][] fields = 
HRegionInfo.parseRegionName(regionName);
-370long regionId =  
Long.parseLong(Bytes.toString(fields[2]));
-371int replicaId = fields.length  3 
? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
-372return new HRegionInfo(
-373  TableName.valueOf(fields[0]), 
fields[1], fields[1], false, regionId, replicaId);
-374  }
-375
-376  /**
-377   * Gets the result in hbase:meta for 
the specified region.
-378   * @param connection connection we're 
using
-379   * @param regionName region we're 
looking for
-380   * @return result of the specified 
region
-381   * @throws IOException
-382   */
-383  public static Result 
getRegionResult(Connection connection,
-384  byte[] regionName) throws 
IOException {
-385Get get = new Get(regionName);
-386
get.addFamily(HConstants.CATALOG_FAMILY);
-387return get(getMetaHTable(connection), 
get);
-388  }
-389
-390  /**
-391   * Get regions from the merge qualifier 
of the specified merged region
-392   * @return null if it doesn't contain 
merge qualifier, else two merge regions
-393   * @throws IOException
-394   */
-395  @Nullable
-396  public static PairHRegionInfo, 
HRegionInfo getRegionsFromMergeQualifier(
-397  Connection connection, byte[] 
regionName) throws IOException {
-398Result result = 
getRegionResult(connection, regionName);
-399HRegionInfo mergeA = 
getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
-400HRegionInfo mergeB = 
getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
-401if (mergeA == null  mergeB 
== null) {
-402  return null;
-403}
-404return new Pair(mergeA, 
mergeB);
-405 }
-406
-407  /**
-408   * Checks if the specified table 
exists.  Looks at the hbase:meta table hosted on
-409   * the specified server.
-410   * @param connection connection we're 
using
-411   * @param tableName table to check
-412   * @return true if the table exists in 
meta, false if not
-413   * @throws IOException
-414   */
-415  public static boolean 
tableExists(Connection connection,
-416  final TableName tableName)
-417  throws IOException {
-418// Catalog tables always exist.
-419return 
tableName.equals(TableName.META_TABLE_NAME)
-420|| getTableState(connection, 
tableName) != null;
-421  }
-422
-423  /**
-424   * Lists all of the regions currently 
in META.
-425   *
-426   * @param connection to connect with
-427   * @param excludeOfflinedSplitParents 
False if we are to include offlined/splitparents regions,
-428   *
true and we'll leave out offlined regions from returned list
-429   * @return List of all user-space 
regions.
-430   * @throws IOException
-431   */
-432  @VisibleForTesting
-433  public static ListHRegionInfo 
getAllRegions(Connection connection,
-434  boolean 
excludeOfflinedSplitParents)
-435  throws IOException {
-436ListPairHRegionInfo, 
ServerName result;
-437
-438result = 
getTableRegionsAndLocations(connection, null,
-439excludeOfflinedSplitParents);
-440
-441return 
getListOfHRegionInfos(result);
-442
-443  }
-444
-445  /**
-446   * Gets all of the regions of the 
specified table. Do not use this method
-447   * to get meta table regions, use 
methods in MetaTableLocator instead.
-448   * @param connection connection we're 
using
-449   * @param tableName table we're looking 
for
-450   * @return Ordered list of {@link 
HRegionInfo}.
-451   * @throws IOException
-452   */
-453  public static ListHRegionInfo 
getTableRegions(Connection connection, TableName tableName)
-454  throws IOException {
-455return getTableRegions(connection, 
tableName, false);
-456  }
-457
-458  /**
-459   * Gets all of the regions of the 
specified table. Do not use this method
-460   * to get meta table regions, use 
methods in MetaTableLocator instead.
-461   * @param connection connection we're 
using
-462   * @param tableName table we're looking 
for
-463   * @param excludeOfflinedSplitParents 
If true, do not include offlined split
-464   * parents in the return.
-465   * @return Ordered list of {@link 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import com.google.common.base.Optional;
+057import com.google.common.collect.Lists;
+058
+059/**
+060 * pThis 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
index 78816d5..ca6eaae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaObserverChore.TablesWithQuotas.html
@@ -169,599 +169,601 @@
 161// The current "view" of region space 
use. Used henceforth.
 162final MapHRegionInfo,Long 
reportedRegionSpaceUse = quotaManager.snapshotRegionSizes();
 163if (LOG.isTraceEnabled()) {
-164  LOG.trace("Using " + 
reportedRegionSpaceUse.size() + " region space use reports");
-165}
-166
-167// Remove the "old" region reports
-168pruneOldRegionReports();
-169
-170// Create the stores to track table 
and namespace snapshots
-171
initializeSnapshotStores(reportedRegionSpaceUse);
-172// Report the number of (non-expired) 
region size reports
-173if (metrics != null) {
-174  
metrics.setNumRegionSizeReports(reportedRegionSpaceUse.size());
-175}
-176
-177// Filter out tables for which we 
don't have adequate regionspace reports yet.
-178// Important that we do this after we 
instantiate the stores above
-179// This gives us a set of Tables 
which may or may not be violating their quota.
-180// To be safe, we want to make sure 
that these are not in violation.
-181SetTableName tablesInLimbo = 
tablesWithQuotas.filterInsufficientlyReportedTables(
-182tableSnapshotStore);
-183
-184if (LOG.isTraceEnabled()) {
-185  LOG.trace("Filtered insufficiently 
reported tables, left with " +
-186  reportedRegionSpaceUse.size() + 
" regions reported");
-187}
-188
-189for (TableName tableInLimbo : 
tablesInLimbo) {
-190  final SpaceQuotaSnapshot 
currentSnapshot = tableSnapshotStore.getCurrentState(tableInLimbo);
-191  if 
(currentSnapshot.getQuotaStatus().isInViolation()) {
-192if (LOG.isTraceEnabled()) {
-193  LOG.trace("Moving " + 
tableInLimbo + " out of violation because fewer region sizes were"
-194  + " reported than 
required.");
-195}
-196SpaceQuotaSnapshot targetSnapshot 
= new SpaceQuotaSnapshot(
-197
SpaceQuotaStatus.notInViolation(), currentSnapshot.getUsage(),
-198
currentSnapshot.getLimit());
-199
this.snapshotNotifier.transitionTable(tableInLimbo, targetSnapshot);
-200// Update it in the Table 
QuotaStore so that memory is consistent with no violation.
-201
tableSnapshotStore.setCurrentState(tableInLimbo, targetSnapshot);
-202  }
-203}
-204
-205// Transition each table to/from 
quota violation based on the current and target state.
-206// Only table quotas are enacted.
-207final SetTableName 
tablesWithTableQuotas = tablesWithQuotas.getTableQuotaTables();
-208
processTablesWithQuotas(tablesWithTableQuotas);
-209
-210// For each Namespace quota, 
transition each table in the namespace in or out of violation
-211// only if a table quota violation 
policy has not already been applied.
-212final SetString 
namespacesWithQuotas = tablesWithQuotas.getNamespacesWithQuotas();
-213final 
MultimapString,TableName tablesByNamespace = 
tablesWithQuotas.getTablesByNamespace();
-214
processNamespacesWithQuotas(namespacesWithQuotas, tablesByNamespace);
-215  }
-216
-217  void 
initializeSnapshotStores(MapHRegionInfo,Long regionSizes) {
-218MapHRegionInfo,Long 
immutableRegionSpaceUse = Collections.unmodifiableMap(regionSizes);
-219if (tableSnapshotStore == null) {
-220  tableSnapshotStore = new 
TableQuotaSnapshotStore(conn, this, immutableRegionSpaceUse);
-221} else {
-222  
tableSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-223}
-224if (namespaceSnapshotStore == null) 
{
-225  namespaceSnapshotStore = new 
NamespaceQuotaSnapshotStore(
-226  conn, this, 
immutableRegionSpaceUse);
-227} else {
-228  
namespaceSnapshotStore.setRegionUsage(immutableRegionSpaceUse);
-229}
-230  }
-231
-232  /**
-233   * Processes each {@code TableName} 
which has a quota defined and moves it in or out of
-234   * violation based on the space use.
-235   *
-236   * @param tablesWithTableQuotas The 
HBase tables which have quotas defined
-237   */
-238  void processTablesWithQuotas(final 
SetTableName tablesWithTableQuotas) throws IOException {
-239long numTablesInViolation = 0L;
-240for (TableName table : 
tablesWithTableQuotas) {
-241  final SpaceQuota spaceQuota = 
tableSnapshotStore.getSpaceQuota(table);
-242  if (spaceQuota == null) {
-243if 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 980406f..ec71b82 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -798,14 +798,6 @@ service.
 
 
 
-HTableDescriptor(TableNamename,
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHColumnDescriptorfamilies,
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytesvalues,
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
-Deprecated.
-
-
-
 HTableDescriptor(TableNamename,
 HTableDescriptordesc)
 Deprecated.
@@ -813,19 +805,19 @@ service.
  but using a different table name.
 
 
-
+
 TableExistsException(TableNamet)
 
-
+
 TableNotDisabledException(TableNametableName)
 
-
+
 TableNotEnabledException(TableNametableName)
 
-
+
 TableNotFoundException(TableNametableName)
 
-
+
 TableVisitorBase(TableNametableName)
 
 
@@ -2323,28 +2315,28 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncHBaseAdmin.addColumnFamily(TableNametableName,
-   HColumnDescriptorcolumnFamily)
+AsyncHBaseAdmin.addColumnFamily(TableNametableName,
+   ColumnFamilyDescriptorcolumnFamily)
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncAdmin.addColumnFamily(TableNametableName,
+   ColumnFamilyDescriptorcolumnFamily)
+Add a column family to an existing table.
+
+
+
 void
 Admin.addColumnFamily(TableNametableName,
HColumnDescriptorcolumnFamily)
 Add a column family to an existing table.
 
 
-
+
 void
 HBaseAdmin.addColumnFamily(TableNametableName,
HColumnDescriptorcolumnFamily)
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncAdmin.addColumnFamily(TableNametableName,
-   HColumnDescriptorcolumnFamily)
-Add a column family to an existing table.
-
-
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 Admin.addColumnFamilyAsync(TableNametableName,
@@ -3505,28 +3497,28 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncHBaseAdmin.modifyColumnFamily(TableNametableName,
-  HColumnDescriptorcolumnFamily)
+AsyncHBaseAdmin.modifyColumnFamily(TableNametableName,
+  ColumnFamilyDescriptorcolumnFamily)
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncAdmin.modifyColumnFamily(TableNametableName,
+  ColumnFamilyDescriptorcolumnFamily)
+Modify an existing column family on a table.
+
+
+
 void
 Admin.modifyColumnFamily(TableNametableName,
   HColumnDescriptorcolumnFamily)
 Modify an existing column family on a table.
 
 
-
+
 void
 HBaseAdmin.modifyColumnFamily(TableNametableName,
   

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/book.html
--
diff --git a/book.html b/book.html
index f0833f3..05774fe 100644
--- a/book.html
+++ b/book.html
@@ -17,7 +17,7 @@
 
 Apache HBase Team
 mailto:hbase-...@lists.apache.org;>hbase-...@lists.apache.org
-version 2.0.0-SNAPSHOT
+version 3.0.0-SNAPSHOT
 
 
 Contents
@@ -499,8 +499,8 @@ Do not download the file ending in src.tar.gz for 
now.
 Extract the downloaded file, and change to the newly-created directory.
 
 
-$ tar xzvf 
hbase-2.0.0-SNAPSHOT-bin.tar.gz
-$ cd hbase-2.0.0-SNAPSHOT/
+$ tar xzvf 
hbase-3.0.0-SNAPSHOT-bin.tar.gz
+$ cd hbase-3.0.0-SNAPSHOT/
 
 
 
@@ -34726,7 +34726,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 
 
 
-Version 2.0.0-SNAPSHOT
+Version 3.0.0-SNAPSHOT
 Last updated 2017-04-11 16:49:09 +00:00
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 8d81011..9d2a1fd 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-06-06
+  Last Published: 
2017-06-07
 
 
 



[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index dac1be7..c63a61c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -329,6 +329,22 @@
 SplitTableRegionProcedure.getParentRegionServerName(MasterProcedureEnvenv)
 
 
+protected ProcedureMetrics
+UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+
+
+protected ProcedureMetrics
+SplitTableRegionProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+
+
+protected ProcedureMetrics
+MergeTableRegionsProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+
+
+protected ProcedureMetrics
+AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+
+
 (package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
 Util.getRegionInfoResponse(MasterProcedureEnvenv,
  ServerNameregionLocation,
@@ -1222,82 +1238,86 @@
 ProcedureSyncWait.getMasterQuotaManager(MasterProcedureEnvenv)
 
 
+protected ProcedureMetrics
+ServerCrashProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 AddColumnFamilyProcedure.getRegionInfoList(MasterProcedureEnvenv)
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 ModifyTableProcedure.getRegionInfoList(MasterProcedureEnvenv)
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 DeleteColumnFamilyProcedure.getRegionInfoList(MasterProcedureEnvenv)
 
-
+
 private RegionLoad
 DispatchMergingRegionsProcedure.getRegionLoad(MasterProcedureEnvenv,
  ServerNamesn,
  HRegionInfohri)
 
-
+
 private ServerName
 DispatchMergingRegionsProcedure.getServerName(MasterProcedureEnvenv)
 The procedure could be restarted from a different 
machine.
 
 
-
+
 private static TableNamespaceManager
 DeleteNamespaceProcedure.getTableNamespaceManager(MasterProcedureEnvenv)
 
-
+
 private static TableNamespaceManager
 CreateNamespaceProcedure.getTableNamespaceManager(MasterProcedureEnvenv)
 
-
+
 private TableNamespaceManager
 ModifyNamespaceProcedure.getTableNamespaceManager(MasterProcedureEnvenv)
 
-
+
 private int
 DispatchMergingRegionsProcedure.getTimeout(MasterProcedureEnvenv)
 The procedure could be restarted from a different 
machine.
 
 
-
+
 private void
 ServerCrashProcedure.handleRIT(MasterProcedureEnvenv,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions)
 Handle any outstanding RIT that are up against 
this.serverName, the crashed server.
 
 
-
+
 protected boolean
 AbstractStateMachineRegionProcedure.hasLock(MasterProcedureEnvenv)
 
-
+
 protected boolean
 AbstractStateMachineRegionProcedure.holdLock(MasterProcedureEnvenv)
 
-
+
 private void
 ModifyNamespaceProcedure.insertIntoNSTable(MasterProcedureEnvenv)
 Insert/update the row into namespace table
 
 
-
+
 protected static void
 CreateNamespaceProcedure.insertIntoNSTable(MasterProcedureEnvenv,
  NamespaceDescriptornsDescriptor)
 Insert the row into ns table
 
 
-
+
 private boolean
 DispatchMergingRegionsProcedure.isRegionsOnTheSameServer(MasterProcedureEnvenv)
 The procedure could be restarted from a different 
machine.
 
 
-
+
 protected boolean
 ServerCrashProcedure.isYieldBeforeExecuteFromState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashStatestate)
@@ -1306,554 +1326,554 @@
  before we move to the next.
 
 
-
+
 private boolean
 DispatchMergingRegionsProcedure.MoveRegionsToSameRS(MasterProcedureEnvenv)
 Move all regions to the same region server
 
 
-
+
 protected static void
 CreateTableProcedure.moveTempDirectoryToHBaseRoot(MasterProcedureEnvenv,
 HTableDescriptorhTableDescriptor,
 
org.apache.hadoop.fs.PathtempTableDir)
 
-
+
 private void
 AddColumnFamilyProcedure.postAdd(MasterProcedureEnvenv,

org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStatestate)
 Action after adding column family.
 
 
-
+
 private void
 CloneSnapshotProcedure.postCloneSnapshot(MasterProcedureEnvenv)
 Action after cloning from snapshot.
 
 
-
+
 private void
 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
index 03bad9a..62e2e49 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
@@ -565,7 +565,11 @@
 
 default void
 RegionObserver.postCompleteSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Called after any split request is processed.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+ implement MasterObserver.postCompletedSplitRegionAction(ObserverContext,
 HRegionInfo, HRegionInfo)
+ instead.
+
 
 
 
@@ -1114,7 +1118,9 @@
 
 default void
 RegionObserver.postRollBackSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
@@ -1242,7 +1248,7 @@
  Regionl,
  Regionr)
 Deprecated.
-Use postCompleteSplit() 
instead
+No longer called in 
hbase2/AMv2 given the master runs splits now;
 
 
 
@@ -2271,7 +2277,9 @@
 
 default void
 RegionObserver.preRollBackSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
@@ -2374,8 +2382,7 @@
 default void
 RegionObserver.preSplit(ObserverContextRegionCoprocessorEnvironmentc)
 Deprecated.
-Use preSplit(
-final ObserverContextRegionCoprocessorEnvironment c, byte[] 
splitRow)
+No longer called in 
hbase2/AMv2 given the master runs splits now;
 
 
 
@@ -2383,13 +2390,17 @@
 default void
 RegionObserver.preSplit(ObserverContextRegionCoprocessorEnvironmentc,
 byte[]splitRow)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
 default void
 RegionObserver.preSplitAfterPONR(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
@@ -2397,7 +2408,9 @@
 RegionObserver.preSplitBeforePONR(ObserverContextRegionCoprocessorEnvironmentctx,
   byte[]splitKey,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListMutationmetaEntries)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
index 838a586..084629a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
@@ -350,7 +350,11 @@
 
 default void
 RegionObserver.postCompleteSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Called after any split request is processed.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+ implement MasterObserver.postCompletedSplitRegionAction(ObserverContext,
 HRegionInfo, HRegionInfo)
+ instead.
+
 
 
 
@@ -476,7 +480,9 @@
 
 default void
 RegionObserver.postRollBackSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
@@ -534,7 +540,7 @@
  Regionl,
  Regionr)
 Deprecated.
-Use postCompleteSplit() 
instead
+No longer called in 
hbase2/AMv2 given the master runs splits now;
 
 
 
@@ -913,7 +919,9 @@
 
 default void
 RegionObserver.preRollBackSplit(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
@@ -945,8 +953,7 @@
 default void
 RegionObserver.preSplit(ObserverContextRegionCoprocessorEnvironmentc)
 Deprecated.
-Use preSplit(
-final ObserverContextRegionCoprocessorEnvironment c, byte[] 
splitRow)
+No longer called in 
hbase2/AMv2 given the master runs splits now;
 
 
 
@@ -954,13 +961,17 @@
 default void
 RegionObserver.preSplit(ObserverContextRegionCoprocessorEnvironmentc,
 byte[]splitRow)
-Deprecated.
+Deprecated.
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 
 default void
 RegionObserver.preSplitAfterPONR(ObserverContextRegionCoprocessorEnvironmentctx)
-Deprecated.
+Deprecated.
+No longer called 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
index b78dcdf..9fdf96b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
@@ -336,6 +336,20 @@
TableNametableName)
 
 
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse
+QuotaStatusCalls.getMasterQuotaStates(ClusterConnectionclusterConn,
+inttimeout)
+See QuotaStatusCalls.getMasterQuotaStates(Connection,
 RpcControllerFactory, RpcRetryingCallerFactory, int)
+
+
+
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse
+QuotaStatusCalls.getMasterRegionSizes(ClusterConnectionclusterConn,
+inttimeout)
+See QuotaStatusCalls.getMasterRegionSizes(Connection,
 RpcControllerFactory, RpcRetryingCallerFactory, int)
+
+
+
 (package private) static RegionLocations
 RpcRetryingCallerWithReadReplicas.getRegionLocations(booleanuseCache,
   intreplicaId,
@@ -352,6 +366,24 @@
   intreplicaId)
 
 
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse
+QuotaStatusCalls.getRegionServerQuotaSnapshot(ClusterConnectionclusterConn,
+inttimeout,
+ServerNamesn)
+See QuotaStatusCalls.getRegionServerQuotaSnapshot(ClusterConnection,
 RpcControllerFactory, int, ServerName)
+
+
+
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse
+QuotaStatusCalls.getRegionServerQuotaSnapshot(ClusterConnectionconn,
+RpcControllerFactoryfactory,
+inttimeout,
+ServerNamesn)
+Executes an RPC to the RegionServer identified by the 
ServerName to fetch its view
+ on space quotas.
+
+
+
 (package private) static NonceGenerator
 ConnectionImplementation.injectNonceGeneratorForTesting(ClusterConnectionconn,
   NonceGeneratorcnm)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
index 8863c9a..c2e9c5e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
@@ -1175,6 +1175,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringbulkToken)
 
 
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse
+QuotaStatusCalls.getMasterQuotaStates(Connectionconn,
+RpcControllerFactoryfactory,
+RpcRetryingCallerFactoryrpcCaller,
+inttimeout)
+Executes an RPC tot he HBase master to fetch its view on 
space quotas.
+
+
+
+static 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse
+QuotaStatusCalls.getMasterRegionSizes(Connectionconn,
+RpcControllerFactoryfactory,
+RpcRetryingCallerFactoryrpcCaller,
+inttimeout)
+Executes an RPC to the HBase master to fetch its view on 
the Region sizes.
+
+
+
 (package private) static Registry
 RegistryFactory.getRegistry(Connectionconnection)
 
@@ -1664,6 +1682,30 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private Connection
+TableQuotaSnapshotStore.conn
+
+
+private Connection
+SpaceQuotaRefresherChore.conn
+
+
+private Connection
+NamespaceQuotaSnapshotStore.conn
+
+
+private Connection
+QuotaObserverChore.conn
+
+
+private Connection
+QuotaObserverChore.TablesWithQuotas.conn
+
+
+private Connection
+TableSpaceQuotaSnapshotNotifier.conn
+
+
+private Connection
 QuotaRetriever.connection
 Connection to use.
 
@@ -1671,6 +1713,23 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+Methods in org.apache.hadoop.hbase.quotas
 that return Connection
+
+Modifier and Type
+Method and Description
+
+
+
+(package private) Connection
+RegionServerSpaceQuotaManager.getConnection()
+
+
+(package private) Connection
+SpaceQuotaRefresherChore.getConnection()
+
+
+
+
 Methods in 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
index 15c41e8..d1b87db 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
@@ -724,616 +724,614 @@
 716  npe = new NullPointerException("row 
is null");
 717} else if (family == null) {
 718  npe = new 
NullPointerException("family is null");
-719} else if (qualifier == null) {
-720  npe = new 
NullPointerException("qualifier is null");
-721}
-722if (npe != null) {
-723  throw new IOException(
-724  "Invalid arguments to 
incrementColumnValue", npe);
-725}
-726
-727
NoncedRegionServerCallableLong callable =
-728new 
NoncedRegionServerCallableLong(this.connection, getName(), row,
-729
this.rpcControllerFactory.newController()) {
-730  @Override
-731  protected Long rpcCall() throws 
Exception {
-732MutateRequest request = 
RequestConverter.buildIncrementRequest(
-733  
getLocation().getRegionInfo().getRegionName(), row, family,
-734  qualifier, amount, durability, 
getNonceGroup(), getNonce());
-735MutateResponse response = 
doMutate(request);
-736Result result = 
ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
-737return 
Long.valueOf(Bytes.toLong(result.getValue(family, qualifier)));
-738  }
-739};
-740return rpcCallerFactory.Long 
newCaller(this.writeRpcTimeout).
-741callWithRetries(callable, 
this.operationTimeout);
-742  }
-743
-744  /**
-745   * {@inheritDoc}
-746   */
-747  @Override
-748  public boolean checkAndPut(final byte 
[] row,
-749  final byte [] family, final byte [] 
qualifier, final byte [] value,
-750  final Put put)
-751  throws IOException {
-752ClientServiceCallableBoolean 
callable = new ClientServiceCallableBoolean(this.connection, getName(), 
row,
-753
this.rpcControllerFactory.newController()) {
-754  @Override
-755  protected Boolean rpcCall() throws 
Exception {
-756MutateRequest request = 
RequestConverter.buildMutateRequest(
-757  
getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
-758  new BinaryComparator(value), 
CompareType.EQUAL, put);
-759MutateResponse response = 
doMutate(request);
-760return 
Boolean.valueOf(response.getProcessed());
-761  }
-762};
-763return 
rpcCallerFactory.Boolean newCaller(this.writeRpcTimeout).
-764callWithRetries(callable, 
this.operationTimeout);
-765  }
-766
-767  /**
-768   * {@inheritDoc}
-769   */
-770  @Override
-771  public boolean checkAndPut(final byte 
[] row, final byte [] family,
-772  final byte [] qualifier, final 
CompareOp compareOp, final byte [] value,
-773  final Put put)
-774  throws IOException {
-775ClientServiceCallableBoolean 
callable =
-776new 
ClientServiceCallableBoolean(this.connection, getName(), row,
-777
this.rpcControllerFactory.newController()) {
-778  @Override
-779  protected Boolean rpcCall() throws 
Exception {
-780CompareType compareType = 
CompareType.valueOf(compareOp.name());
-781MutateRequest request = 
RequestConverter.buildMutateRequest(
-782  
getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
-783  new BinaryComparator(value), 
compareType, put);
-784MutateResponse response = 
doMutate(request);
-785return 
Boolean.valueOf(response.getProcessed());
-786  }
-787};
-788return 
rpcCallerFactory.Boolean newCaller(this.writeRpcTimeout).
-789callWithRetries(callable, 
this.operationTimeout);
-790  }
-791
-792  /**
-793   * {@inheritDoc}
-794   */
-795  @Override
-796  public boolean checkAndDelete(final 
byte [] row, final byte [] family, final byte [] qualifier,
-797  final byte [] value, final Delete 
delete) throws IOException {
-798return checkAndDelete(row, family, 
qualifier, CompareOp.EQUAL, value, delete);
-799  }
-800
-801  /**
-802   * {@inheritDoc}
-803   */
-804  @Override
-805  public boolean checkAndDelete(final 
byte [] row, final byte [] family,
-806  final byte [] qualifier, final 
CompareOp compareOp, final byte [] value,
-807  final Delete delete)
-808  throws IOException {
-809
CancellableRegionServerCallableSingleResponse callable =
-810new 
CancellableRegionServerCallableSingleResponse(
-811this.connection, getName(), 
row, this.rpcControllerFactory.newController(),
-812writeRpcTimeout, new 
RetryingTimeTracker().start()) {
-813  @Override
-814  protected SingleResponse rpcCall() 
throws Exception 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index fa19477..a11a5f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -195,7 +195,7 @@ implements countPerRow
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 currentScanners
 
 
@@ -1056,7 +1056,7 @@ implements 
 
 currentScanners
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner currentScanners
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner currentScanners
 
 
 
@@ -1065,7 +1065,7 @@ implements 
 
 flushLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock flushLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock flushLock
 
 
 
@@ -1074,7 +1074,7 @@ implements 
 
 readPt
-protected finallong readPt
+protected finallong readPt
 
 
 
@@ -1091,7 +1091,7 @@ implements 
 
 StoreScanner
-protectedStoreScanner(Storestore,
+protectedStoreScanner(Storestore,
Scanscan,
ScanInfoscanInfo,
http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
@@ -1106,7 +1106,7 @@ implements 
 
 StoreScanner
-publicStoreScanner(Storestore,
+publicStoreScanner(Storestore,
 ScanInfoscanInfo,
 Scanscan,
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
@@ -1130,7 +1130,7 @@ implements 
 
 StoreScanner
-publicStoreScanner(Storestore,
+publicStoreScanner(Storestore,
 ScanInfoscanInfo,
 Scanscan,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
@@ -1159,7 +1159,7 @@ implements 
 
 StoreScanner
-publicStoreScanner(Storestore,
+publicStoreScanner(Storestore,
 ScanInfoscanInfo,
 Scanscan,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
@@ -1190,7 +1190,7 @@ implements 
 
 StoreScanner
-privateStoreScanner(Storestore,
+privateStoreScanner(Storestore,
  ScanInfoscanInfo,
  Scanscan,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
@@ -1212,7 +1212,7 @@ implements 
 
 StoreScanner
-StoreScanner(Scanscan,
+StoreScanner(Scanscan,
  ScanInfoscanInfo,
  ScanTypescanType,
  http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
@@ -1230,7 +1230,7 @@ implements 
 
 StoreScanner
-StoreScanner(Scanscan,
+StoreScanner(Scanscan,
  ScanInfoscanInfo,
  ScanTypescanType,
  http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
@@ -1249,7 +1249,7 @@ implements 
 
 StoreScanner
-publicStoreScanner(Scanscan,
+publicStoreScanner(Scanscan,
 ScanInfoscanInfo,
 ScanTypescanType,
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
@@ -1277,7 +1277,7 @@ implements 
 
 addCurrentScanners
-privatevoidaddCurrentScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners)

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
index 109b5f3..e484176 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
@@ -23,484 +23,310 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package org.apache.hadoop.hbase.ipc;
-020
-021import 
io.netty.bootstrap.ServerBootstrap;
-022import io.netty.buffer.ByteBuf;
-023import 
io.netty.buffer.PooledByteBufAllocator;
-024import io.netty.buffer.Unpooled;
-025import io.netty.channel.Channel;
-026import io.netty.channel.ChannelFuture;
-027import 
io.netty.channel.ChannelFutureListener;
-028import 
io.netty.channel.ChannelHandlerContext;
-029import 
io.netty.channel.ChannelInboundHandlerAdapter;
-030import 
io.netty.channel.ChannelInitializer;
-031import io.netty.channel.ChannelOption;
-032import 
io.netty.channel.ChannelOutboundHandlerAdapter;
-033import 
io.netty.channel.ChannelPipeline;
-034import io.netty.channel.ChannelPromise;
-035import io.netty.channel.EventLoopGroup;
-036import 
io.netty.channel.epoll.EpollEventLoopGroup;
-037import 
io.netty.channel.epoll.EpollServerSocketChannel;
-038import 
io.netty.channel.group.ChannelGroup;
-039import 
io.netty.channel.group.DefaultChannelGroup;
-040import 
io.netty.channel.nio.NioEventLoopGroup;
-041import 
io.netty.channel.socket.SocketChannel;
-042import 
io.netty.channel.socket.nio.NioServerSocketChannel;
-043import 
io.netty.handler.codec.ByteToMessageDecoder;
-044import 
io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-045import 
io.netty.util.concurrent.GlobalEventExecutor;
-046
-047import java.io.IOException;
-048import java.io.InterruptedIOException;
-049import java.net.InetAddress;
-050import java.net.InetSocketAddress;
-051import java.nio.ByteBuffer;
-052import java.util.Arrays;
-053import java.util.List;
-054import 
java.util.concurrent.CountDownLatch;
-055
-056import org.apache.commons.logging.Log;
-057import 
org.apache.commons.logging.LogFactory;
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.hbase.CellScanner;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-063import 
org.apache.hadoop.hbase.nio.ByteBuff;
-064import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-065import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-066import 
org.apache.hadoop.hbase.security.AuthMethod;
-067import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-068import 
org.apache.hadoop.hbase.security.SaslStatus;
-069import 
org.apache.hadoop.hbase.security.SaslUtil;
-070import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.JVM;
-076import 
org.apache.hadoop.hbase.util.Pair;
-077import 
org.apache.hadoop.io.IntWritable;
-078import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-079import org.apache.htrace.TraceInfo;
+018package org.apache.hadoop.hbase.ipc;
+019
+020import 
io.netty.bootstrap.ServerBootstrap;
+021import io.netty.buffer.ByteBuf;
+022import 
io.netty.buffer.PooledByteBufAllocator;
+023import io.netty.buffer.Unpooled;
+024import io.netty.channel.Channel;
+025import io.netty.channel.ChannelFuture;
+026import 
io.netty.channel.ChannelFutureListener;
+027import 
io.netty.channel.ChannelHandlerContext;
+028import 
io.netty.channel.ChannelInboundHandlerAdapter;
+029import 
io.netty.channel.ChannelInitializer;
+030import io.netty.channel.ChannelOption;
+031import 
io.netty.channel.ChannelOutboundHandlerAdapter;
+032import 
io.netty.channel.ChannelPipeline;
+033import io.netty.channel.ChannelPromise;
+034import io.netty.channel.EventLoopGroup;
+035import 
io.netty.channel.epoll.EpollEventLoopGroup;
+036import 
io.netty.channel.epoll.EpollServerSocketChannel;
+037import 
io.netty.channel.group.ChannelGroup;
+038import 
io.netty.channel.group.DefaultChannelGroup;
+039import 
io.netty.channel.nio.NioEventLoopGroup;
+040import 
io.netty.channel.socket.SocketChannel;
+041import 
io.netty.channel.socket.nio.NioServerSocketChannel;
+042import 
io.netty.handler.codec.ByteToMessageDecoder;
+043import 
io.netty.handler.codec.LengthFieldBasedFrameDecoder;
+044import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
index 2ccefa4..87d7143 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
@@ -141,1653 +141,1703 @@
 133
 134  private final static String 
BACKUP_INFO_PREFIX = "session:";
 135  private final static String 
START_CODE_ROW = "startcode:";
-136  private final static String 
INCR_BACKUP_SET = "incrbackupset:";
-137  private final static String 
TABLE_RS_LOG_MAP_PREFIX = "trslm:";
-138  private final static String 
RS_LOG_TS_PREFIX = "rslogts:";
-139
-140  private final static String 
BULK_LOAD_PREFIX = "bulk:";
-141  private final static byte[] 
BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
-142  final static byte[] TBL_COL = 
Bytes.toBytes("tbl");
-143  final static byte[] FAM_COL = 
Bytes.toBytes("fam");
-144  final static byte[] PATH_COL = 
Bytes.toBytes("path");
-145  final static byte[] STATE_COL = 
Bytes.toBytes("state");
-146  // the two states a bulk loaded file 
can be
-147  final static byte[] BL_PREPARE = 
Bytes.toBytes("R");
-148  final static byte[] BL_COMMIT = 
Bytes.toBytes("D");
-149
-150  private final static String WALS_PREFIX 
= "wals:";
-151  private final static String 
SET_KEY_PREFIX = "backupset:";
-152
-153  // separator between BULK_LOAD_PREFIX 
and ordinals
-154 protected final static String 
BLK_LD_DELIM = ":";
-155  private final static byte[] EMPTY_VALUE 
= new byte[] {};
-156
-157  // Safe delimiter in a string
-158  private final static String NULL = 
"\u";
-159
-160  public BackupSystemTable(Connection 
conn) throws IOException {
-161this.connection = conn;
-162tableName = 
BackupSystemTable.getTableName(conn.getConfiguration());
-163checkSystemTable();
-164  }
+136  private final static byte[] 
ACTIVE_SESSION_ROW = "activesession:".getBytes();
+137  private final static byte[] 
ACTIVE_SESSION_COL = "c".getBytes();
+138
+139  private final static byte[] 
ACTIVE_SESSION_YES = "yes".getBytes();
+140  private final static byte[] 
ACTIVE_SESSION_NO = "no".getBytes();
+141
+142  private final static String 
INCR_BACKUP_SET = "incrbackupset:";
+143  private final static String 
TABLE_RS_LOG_MAP_PREFIX = "trslm:";
+144  private final static String 
RS_LOG_TS_PREFIX = "rslogts:";
+145
+146  private final static String 
BULK_LOAD_PREFIX = "bulk:";
+147  private final static byte[] 
BULK_LOAD_PREFIX_BYTES = BULK_LOAD_PREFIX.getBytes();
+148  final static byte[] TBL_COL = 
Bytes.toBytes("tbl");
+149  final static byte[] FAM_COL = 
Bytes.toBytes("fam");
+150  final static byte[] PATH_COL = 
Bytes.toBytes("path");
+151  final static byte[] STATE_COL = 
Bytes.toBytes("state");
+152  // the two states a bulk loaded file 
can be
+153  final static byte[] BL_PREPARE = 
Bytes.toBytes("R");
+154  final static byte[] BL_COMMIT = 
Bytes.toBytes("D");
+155
+156  private final static String WALS_PREFIX 
= "wals:";
+157  private final static String 
SET_KEY_PREFIX = "backupset:";
+158
+159  // separator between BULK_LOAD_PREFIX 
and ordinals
+160 protected final static String 
BLK_LD_DELIM = ":";
+161  private final static byte[] EMPTY_VALUE 
= new byte[] {};
+162
+163  // Safe delimiter in a string
+164  private final static String NULL = 
"\u";
 165
-166  private void checkSystemTable() throws 
IOException {
-167try (Admin admin = 
connection.getAdmin();) {
-168
-169  verifyNamespaceExists(admin);
-170
-171  if (!admin.tableExists(tableName)) 
{
-172HTableDescriptor backupHTD =
-173
BackupSystemTable.getSystemTableDescriptor(connection.getConfiguration());
-174admin.createTable(backupHTD);
-175  }
-176  waitForSystemTable(admin);
-177}
-178  }
-179
-180  private void 
verifyNamespaceExists(Admin admin) throws IOException {
-181  String namespaceName  = 
tableName.getNamespaceAsString();
-182  NamespaceDescriptor ns = 
NamespaceDescriptor.create(namespaceName).build();
-183  NamespaceDescriptor[] list = 
admin.listNamespaceDescriptors();
-184  boolean exists = false;
-185  for( NamespaceDescriptor nsd: list) 
{
-186if 
(nsd.getName().equals(ns.getName())) {
-187  exists = true;
-188  break;
-189}
-190  }
-191  if (!exists) {
-192admin.createNamespace(ns);
-193  }
-194  }
-195
-196  private void waitForSystemTable(Admin 
admin) throws IOException {
-197long TIMEOUT = 6;
-198long startTime = 
EnvironmentEdgeManager.currentTime();
-199while (!admin.tableExists(tableName) 
|| 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index f2c44db..6cf2fc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -2581,7 +2581,7 @@
 2573try {
 2574  // Restore snapshot
 2575  get(
-2576
internalRestoreSnapshotAsync(snapshotName, tableName, false),
+2576
internalRestoreSnapshotAsync(snapshotName, tableName),
 2577syncWaitTimeout,
 2578TimeUnit.MILLISECONDS);
 2579} catch (IOException e) {
@@ -2590,7 +2590,7 @@
 2582  if (takeFailSafeSnapshot) {
 2583try {
 2584  get(
-2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false),
+2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
 2586syncWaitTimeout,
 2587TimeUnit.MILLISECONDS);
 2588  String msg = "Restore 
snapshot=" + snapshotName +
@@ -2633,7 +2633,7 @@
 2625  throw new 
TableNotDisabledException(tableName);
 2626}
 2627
-2628return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
+2628return 
internalRestoreSnapshotAsync(snapshotName, tableName);
 2629  }
 2630
 2631  @Override
@@ -2643,1621 +2643,1614 @@
 2635  }
 2636
 2637  @Override
-2638  public void cloneSnapshot(String 
snapshotName, TableName tableName, boolean restoreAcl)
+2638  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
 2639  throws IOException, 
TableExistsException, RestoreSnapshotException {
 2640if (tableExists(tableName)) {
 2641  throw new 
TableExistsException(tableName);
 2642}
 2643get(
-2644  
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
+2644  
internalRestoreSnapshotAsync(snapshotName, tableName),
 2645  Integer.MAX_VALUE,
 2646  TimeUnit.MILLISECONDS);
 2647  }
 2648
 2649  @Override
-2650  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
-2651  throws IOException, 
TableExistsException, RestoreSnapshotException {
-2652cloneSnapshot(snapshotName, 
tableName, false);
-2653  }
-2654
-2655  @Override
-2656  public FutureVoid 
cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-2657  throws IOException, 
TableExistsException {
-2658if (tableExists(tableName)) {
-2659  throw new 
TableExistsException(tableName);
-2660}
-2661return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
-2662  }
-2663
-2664  @Override
-2665  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
-2666  throws IOException {
-2667ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2668final ExecProcedureRequest request 
=
-2669
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(
-2672  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
-2673@Override
-2674protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675  return 
master.execProcedureWithRet(getRpcController(), request);
-2676}
-2677  });
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2686final ExecProcedureRequest request 
=
-2687
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2688// run the procedure on the master
-2689ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2690getConnection(), 
getRpcControllerFactory()) {
-2691  @Override
-2692  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2693return 
master.execProcedure(getRpcController(), request);
-2694  }
-2695});
-2696
-2697long start = 
EnvironmentEdgeManager.currentTime();
-2698long max = 
response.getExpectedTimeout();
-2699long maxPauseTime = max / 
this.numRetries;
-2700int tries = 0;
-2701LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2702signature + " : " + instance + 
"'' to complete. (max " + maxPauseTime + " ms per 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index 11e5e0a..bb27967 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -31,381 +31,386 @@
 023import java.util.SortedSet;
 024
 025import 
org.apache.commons.lang.NotImplementedException;
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CellUtil;
-028import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.client.Scan;
-030
-031/**
-032 * A scanner of a single memstore 
segment.
-033 */
-034@InterfaceAudience.Private
-035public class SegmentScanner implements 
KeyValueScanner {
-036
-037  /**
-038   * Order of this scanner relative to 
other scanners. See
-039   * {@link 
KeyValueScanner#getScannerOrder()}.
-040   */
-041  private long scannerOrder;
-042  private static final long 
DEFAULT_SCANNER_ORDER = Long.MAX_VALUE;
-043
-044  // the observed structure
-045  protected final Segment segment;
-046  // the highest relevant MVCC
-047  private long readPoint;
-048  // the current iterator that can be 
reinitialized by
-049  // seek(), backwardSeek(), or 
reseek()
-050  protected IteratorCell iter;
-051  // the pre-calculated cell to be 
returned by peek()
-052  protected Cell current = null;
-053  // or next()
-054  // A flag represents whether could stop 
skipping KeyValues for MVCC
-055  // if have encountered the next row. 
Only used for reversed scan
-056  private boolean 
stopSkippingKVsIfNextRow = false;
-057  // last iterated KVs by seek (to 
restore the iterator state after reseek)
-058  private Cell last = null;
-059
-060  // flag to indicate if this scanner is 
closed
-061  protected boolean closed = false;
-062
-063  protected SegmentScanner(Segment 
segment, long readPoint) {
-064this(segment, readPoint, 
DEFAULT_SCANNER_ORDER);
-065  }
-066
-067  /**
-068   * @param scannerOrder see {@link 
KeyValueScanner#getScannerOrder()}.
-069   * Scanners are ordered from 0 (oldest) 
to newest in increasing order.
-070   */
-071  protected SegmentScanner(Segment 
segment, long readPoint, long scannerOrder) {
-072this.segment = segment;
-073this.readPoint = readPoint;
-074//increase the reference count so the 
underlying structure will not be de-allocated
-075this.segment.incScannerCount();
-076iter = segment.iterator();
-077// the initialization of the current 
is required for working with heap of SegmentScanners
-078updateCurrent();
-079this.scannerOrder = scannerOrder;
-080if (current == null) {
-081  // nothing to fetch from this 
scanner
-082  close();
-083}
-084  }
-085
-086  /**
-087   * Look at the next Cell in this 
scanner, but do not iterate the scanner
-088   * @return the currently observed 
Cell
-089   */
-090  @Override
-091  public Cell peek() {  // sanity 
check, the current should be always valid
-092if (closed) {
-093  return null;
-094}
-095if (current!=null  
current.getSequenceId()  readPoint) {
-096  throw new RuntimeException("current 
is invalid: read point is "+readPoint+", " +
-097  "while current sequence id is " 
+current.getSequenceId());
-098}
-099return current;
-100  }
-101
-102  /**
-103   * Return the next Cell in this 
scanner, iterating the scanner
-104   * @return the next Cell or null if end 
of scanner
-105   */
-106  @Override
-107  public Cell next() throws IOException 
{
-108if (closed) {
-109  return null;
-110}
-111Cell oldCurrent = current;
-112updateCurrent();  // 
update the currently observed Cell
-113return oldCurrent;
-114  }
-115
-116  /**
-117   * Seek the scanner at or after the 
specified Cell.
-118   * @param cell seek value
-119   * @return true if scanner has values 
left, false if end of scanner
-120   */
-121  @Override
-122  public boolean seek(Cell cell) throws 
IOException {
-123if (closed) {
-124  return false;
-125}
-126if(cell == null) {
-127  close();
-128  return false;
-129}
-130// restart the iterator from new 
key
-131iter = getIterator(cell);
-132// last is going to be reinitialized 
in the next getNext() call
-133last = null;
-134updateCurrent();
-135return (current != null);
-136  }
-137
-138  protected IteratorCell 
getIterator(Cell cell) {
-139return 
segment.tailSet(cell).iterator();
-140  }
-141
-142  /**
-143   * Reseek the scanner at or after the 
specified KeyValue.
-144   * This method is guaranteed to seek at 
or after the required key only if the
-145   * key 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/util/FSUtils.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/FSUtils.html 
b/devapidocs/org/apache/hadoop/hbase/util/FSUtils.html
index 638c8ff..64b822c 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/FSUtils.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/FSUtils.html
@@ -221,6 +221,10 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 THREAD_POOLSIZE
 
 
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.FileSystem,http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+warningMap
+
+
 static boolean
 WINDOWS
 Set to true on Windows platforms
@@ -942,13 +946,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 WINDOWS
 public static finalboolean WINDOWS
 Set to true on Windows platforms
 
 
+
+
+
+
+
+warningMap
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.FileSystem,http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean warningMap
+
+
 
 
 
@@ -1010,7 +1023,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setStoragePolicy
-public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
+public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.fs.Pathpath,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringstoragePolicy)
 Sets storage policy for given path.
@@ -1037,7 +1050,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 invokeSetStoragePolicy
-private staticvoidinvokeSetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
+private staticvoidinvokeSetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringstoragePolicy)
 
@@ -1048,7 +1061,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isDistributedFileSystem
-public staticbooleanisDistributedFileSystem(org.apache.hadoop.fs.FileSystemfs)
+public staticbooleanisDistributedFileSystem(org.apache.hadoop.fs.FileSystemfs)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Returns:
@@ -1064,7 +1077,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isStartingWithPath
-public staticbooleanisStartingWithPath(org.apache.hadoop.fs.PathrootPath,
+public staticbooleanisStartingWithPath(org.apache.hadoop.fs.PathrootPath,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpath)
 Compare of path component. Does not consider schema; i.e. 
if schemas
  different but path starts with rootPath,
@@ -1084,7 +1097,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isMatchingTail
-public staticbooleanisMatchingTail(org.apache.hadoop.fs.PathpathToSearch,
+public staticbooleanisMatchingTail(org.apache.hadoop.fs.PathpathToSearch,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpathTail)
 Compare path component of the Path URI; e.g. if 
hdfs://a/b/c and /a/b/c, it will compare the
  '/a/b/c' part. Does not consider schema; i.e. if schemas different but path 
or subpath matches,
@@ -1104,7 +1117,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isMatchingTail
-public staticbooleanisMatchingTail(org.apache.hadoop.fs.PathpathToSearch,
+public staticbooleanisMatchingTail(org.apache.hadoop.fs.PathpathToSearch,
  
org.apache.hadoop.fs.PathpathTail)
 Compare path component of the Path URI; e.g. if 
hdfs://a/b/c and /a/b/c, it will compare the
  '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  
Does not consider
@@ -1124,7 +1137,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getInstance
-public staticFSUtilsgetInstance(org.apache.hadoop.fs.FileSystemfs,
+public 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
index 27e0dee..109b5f3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
@@ -67,157 +67,157 @@
 059import 
org.apache.hadoop.hbase.CellScanner;
 060import 
org.apache.hadoop.hbase.HConstants;
 061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-064import 
org.apache.hadoop.hbase.nio.ByteBuff;
-065import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-066import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-067import 
org.apache.hadoop.hbase.security.AuthMethod;
-068import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-069import 
org.apache.hadoop.hbase.security.SaslStatus;
-070import 
org.apache.hadoop.hbase.security.SaslUtil;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-073import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.JVM;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.io.IntWritable;
-079import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-080import org.apache.htrace.TraceInfo;
-081
-082/**
-083 * An RPC server with Netty4 
implementation.
-084 *
-085 */
-086public class NettyRpcServer extends 
RpcServer {
-087
-088  public static final Log LOG = 
LogFactory.getLog(NettyRpcServer.class);
-089
-090  protected final InetSocketAddress 
bindAddress;
-091
-092  private final CountDownLatch closed = 
new CountDownLatch(1);
-093  private final Channel serverChannel;
-094  private final ChannelGroup allChannels 
= new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);;
-095
-096  public NettyRpcServer(final Server 
server, final String name,
-097  final 
ListBlockingServiceAndInterface services,
-098  final InetSocketAddress 
bindAddress, Configuration conf,
-099  RpcScheduler scheduler) throws 
IOException {
-100super(server, name, services, 
bindAddress, conf, scheduler);
-101this.bindAddress = bindAddress;
-102boolean useEpoll = useEpoll(conf);
-103int workerCount = 
conf.getInt("hbase.netty.rpc.server.worker.count",
-104
Runtime.getRuntime().availableProcessors() / 4);
-105EventLoopGroup bossGroup = null;
-106EventLoopGroup workerGroup = null;
-107if (useEpoll) {
-108  bossGroup = new 
EpollEventLoopGroup(1);
-109  workerGroup = new 
EpollEventLoopGroup(workerCount);
-110} else {
-111  bossGroup = new 
NioEventLoopGroup(1);
-112  workerGroup = new 
NioEventLoopGroup(workerCount);
-113}
-114ServerBootstrap bootstrap = new 
ServerBootstrap();
-115bootstrap.group(bossGroup, 
workerGroup);
-116if (useEpoll) {
-117  
bootstrap.channel(EpollServerSocketChannel.class);
-118} else {
-119  
bootstrap.channel(NioServerSocketChannel.class);
-120}
-121
bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
-122
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
-123
bootstrap.childOption(ChannelOption.ALLOCATOR,
-124
PooledByteBufAllocator.DEFAULT);
-125bootstrap.childHandler(new 
Initializer(maxRequestSize));
-126
-127try {
-128  serverChannel = 
bootstrap.bind(this.bindAddress).sync().channel();
-129  LOG.info("NettyRpcServer bind to 
address=" + serverChannel.localAddress()
-130  + ", 
hbase.netty.rpc.server.worker.count=" + workerCount
-131  + ", useEpoll=" + useEpoll);
-132  allChannels.add(serverChannel);
-133} catch (InterruptedException e) {
-134  throw new 
InterruptedIOException(e.getMessage());
-135}
-136initReconfigurable(conf);
-137this.scheduler.init(new 
RpcSchedulerContext(this));
-138  }
-139
-140  private static boolean 
useEpoll(Configuration conf) {
-141// Config to enable native 
transport.
-142boolean epollEnabled = 
conf.getBoolean("hbase.rpc.server.nativetransport",
-143true);
-144// Use the faster native epoll 
transport mechanism on linux if enabled
-145return epollEnabled  
JVM.isLinux()  JVM.isAmd64();
-146  }
-147
-148  @Override
-149  public synchronized void start() {
-150if (started) {
-151  return;
-152}
-153

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index e3b746f..e6a 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -1048,6 +1048,7 @@
 
 io.netty.handler.codec.ByteToMessageDecoder
 
+org.apache.hadoop.hbase.ipc.NettyRpcServer.ConnectionHeaderHandler
 org.apache.hadoop.hbase.security.SaslChallengeDecoder
 
 
@@ -1058,6 +1059,12 @@
 org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler
 
 
+io.netty.channel.ChannelInitializerC
+
+org.apache.hadoop.hbase.ipc.NettyRpcServer.Initializer
+
+
+org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageDecoder
 io.netty.channel.SimpleChannelInboundHandlerI
 
 org.apache.hadoop.hbase.client.ClusterStatusListener.MulticastListener.ClusterStatusHandler
@@ -1086,6 +1093,7 @@
 org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher.ClusterStatusEncoder
 
 
+org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder
 org.apache.hadoop.hbase.security.SaslWrapHandler
 
 
@@ -2379,6 +2387,7 @@
 org.apache.hadoop.hbase.namespace.NamespaceTableAndRegionInfo
 org.apache.hadoop.hbase.replication.NamespaceTableCfWALEntryFilter (implements 
org.apache.hadoop.hbase.replication.WALCellFilter, 
org.apache.hadoop.hbase.replication.WALEntryFilter)
 org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper
+org.apache.hadoop.hbase.ipc.NettyRpcServer.CallWriteListener (implements 
io.netty.channel.ChannelFutureListener)
 org.apache.hadoop.hbase.http.NoCacheFilter (implements 
javax.servlet.Filter)
 org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController (implements 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController)
 org.apache.hadoop.hbase.util.NonceKey
@@ -3074,17 +3083,20 @@
 
 org.apache.hadoop.hbase.ipc.RpcServer 
(implements org.apache.hadoop.hbase.conf.ConfigurationObserver, 
org.apache.hadoop.hbase.ipc.RpcServerInterface)
 
+org.apache.hadoop.hbase.ipc.NettyRpcServer
 org.apache.hadoop.hbase.ipc.SimpleRpcServer
 
 
 org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface
 org.apache.hadoop.hbase.ipc.RpcServer.Call (implements 
org.apache.hadoop.hbase.ipc.RpcCall)
 
+org.apache.hadoop.hbase.ipc.NettyRpcServer.Call
 org.apache.hadoop.hbase.ipc.SimpleRpcServer.Call
 
 
-org.apache.hadoop.hbase.ipc.RpcServer.Connection
+org.apache.hadoop.hbase.ipc.RpcServer.Connection (implements java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable)
 
+org.apache.hadoop.hbase.ipc.NettyRpcServer.NettyConnection
 org.apache.hadoop.hbase.ipc.SimpleRpcServer.Connection
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 8e47fe2..74c252f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"2.0.0-SNAPSHOT";
-011  public static final String revision = 
"c8a7e80e0a87fa39a720b3da54c0bcf11520ee9f";
+011  public static final String revision = 
"ff998ef74fe7b8d304b2e9e5579b019c62f836db";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Tue 
May  2 14:38:55 UTC 2017";
+013  public static final String date = "Wed 
May  3 14:38:59 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "1c443c1bdcef1b0d474e1f4b16806dc1";
+015  public static final String srcChecksum 
= "c626e1fc5636082b9f9d9907601e524f";
 016}
 
 



[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index 6c52543..f3f7a46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -31,1797 +31,2040 @@
 023import java.util.ArrayList;
 024import java.util.Arrays;
 025import java.util.Collection;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Optional;
-031import 
java.util.concurrent.CompletableFuture;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import java.util.function.BiConsumer;
-035import java.util.regex.Pattern;
-036import java.util.stream.Collectors;
-037
-038import 
com.google.common.annotations.VisibleForTesting;
-039
-040import io.netty.util.Timeout;
-041import io.netty.util.TimerTask;
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-049import 
org.apache.hadoop.hbase.NotServingRegionException;
-050import 
org.apache.hadoop.hbase.RegionLocations;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-053import 
org.apache.hadoop.hbase.HConstants;
-054import 
org.apache.hadoop.hbase.TableExistsException;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-057import 
org.apache.hadoop.hbase.TableNotFoundException;
-058import 
org.apache.hadoop.hbase.UnknownRegionException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-061import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-062import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-063import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-064import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-065import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-066import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-067import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-068import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-069import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-070import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-091import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index cce9f5c..2c698af 100644
--- a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":42,"i24":10,"i25":42,"i26":10,"i27":10,"i28":10,"i29":41,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":9,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":42,"i65":42,"i66":10,"i67":42,"i68":42,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10};
+var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":41,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":42,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,18 +109,23 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHTableDescriptor
+http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHTableDescriptor, TableDescriptor
 
 
 Direct Known Subclasses:
-UnmodifyableHTableDescriptor
+ImmutableHTableDescriptor
 
 
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ use TableDescriptorBuilder to 
build HTableDescriptor.
+
 
-@InterfaceAudience.Public
-public class HTableDescriptor
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+ @InterfaceAudience.Public
+public class HTableDescriptor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHTableDescriptor
+implements TableDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHTableDescriptor
 HTableDescriptor contains the details about an HBase table  
such as the descriptors of
  all the column families, is the table a catalog table,  -ROOT-  
or
   hbase:meta , if the table is read only, the maximum size of the 
memstore,
@@ -146,278 +151,159 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 COMPACTION_ENABLED
-INTERNAL Used by HBase Shell interface to access 
this metadata
- attribute which denotes if the table is compaction enabled
-
-
-
-private static Bytes
-COMPACTION_ENABLED_KEY
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-configuration
-A map which holds the configuration specific to the 
table.
-
+Deprecated.
+
 
 
 static boolean
 DEFAULT_COMPACTION_ENABLED
-Constant that denotes whether the table is compaction 
enabled by default
-
-
-
-private static boolean
-DEFAULT_DEFERRED_LOG_FLUSH
-
-
-private static Durability
-DEFAULT_DURABLITY
-Default durability for HTD is USE_DEFAULT, which defaults 
to HBase-global default value
-
+Deprecated.
+
 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index be839b7..72853dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -45,1639 +45,1784 @@
 037
 038import 
com.google.common.annotations.VisibleForTesting;
 039
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.hbase.HColumnDescriptor;
-043import 
org.apache.hadoop.hbase.HRegionInfo;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.HTableDescriptor;
-046import 
org.apache.hadoop.hbase.MetaTableAccessor;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-048import 
org.apache.hadoop.hbase.NotServingRegionException;
-049import 
org.apache.hadoop.hbase.RegionLocations;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-055import 
org.apache.hadoop.hbase.TableNotFoundException;
-056import 
org.apache.hadoop.hbase.UnknownRegionException;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-059import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-060import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-061import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-062import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-063import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-066import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-067import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-068import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-069import 
org.apache.hadoop.hbase.replication.ReplicationException;
-070import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-071import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-095import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index ac4a9b3..be839b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -30,212 +30,212 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Optional;
-028import 
java.util.concurrent.CompletableFuture;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import java.util.function.BiConsumer;
-032import java.util.regex.Pattern;
-033
-034import 
com.google.common.annotations.VisibleForTesting;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.HRegionLocation;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.MetaTableAccessor;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-043import 
org.apache.hadoop.hbase.NotServingRegionException;
-044import 
org.apache.hadoop.hbase.RegionLocations;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.UnknownRegionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-054import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-055import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-060import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-061import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-085import 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkCell.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkCell.html
new file mode 100644
index 000..f08b4b2
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkCell.html
@@ -0,0 +1,353 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ByteBufferChunkCell (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
ByteBufferChunkCell
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ByteBufferCell
+
+
+org.apache.hadoop.hbase.ByteBufferKeyValue
+
+
+org.apache.hadoop.hbase.regionserver.ByteBufferChunkCell
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, SettableSequenceId, SettableTimestamp
+
+
+
+@InterfaceAudience.Private
+public class ByteBufferChunkCell
+extends ByteBufferKeyValue
+ByteBuffer based cell which has the chunkid at the 0th 
offset
+
+See Also:
+MemStoreLAB
+
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.ByteBufferKeyValue
+buf,
 FIXED_OVERHEAD,
 length,
 offset
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ByteBufferChunkCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
+   intoffset,
+   intlength)
+
+
+ByteBufferChunkCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
+   intoffset,
+   intlength,
+   longseqId)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+int
+getChunkId()
+Extracts the id of the backing bytebuffer of this cell if 
it was obtained from fixed sized
+ chunks as in case of MemstoreLAB
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.ByteBufferKeyValue
+deepClone,
 equals,
 getBuffer,
 getFamilyArray,
 getFamilyByteBuffer,
 getFamilyLength,
 getFamilyOffset,
 getFamilyPosition,
 getOffset,
 getQualifierArray,
 getQualifierByteBuffer,
 getQualifierLength,
 getQualifierOffset,
 getQualifierPosition,
 getRowArray,
 getRowByteBuffer,
 getRowLength,
 getRowOffset, getRowPosition,
 getSequenceId,
 getSerializedSize,
 getTagsArray,
 getTagsByteBuffer,
 getTagsLength,
 getTagsOffset,
 getTagsPosition,
 getTimestamp, getTypeByte,
 getValueArray,
 getValueByteBuffer,
 getValueLength,
 getValueOffset,
 getValuePosition,
 hashCode,
 heapSize,
 setSequenceId,
 setTimestamp,
 setTimestamp,
 toString,
 write,
 write
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index a58f559..98b388b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -756,2562 +756,2560 @@
 748
 749this.masterActiveTime = 
System.currentTimeMillis();
 750// TODO: Do this using Dependency 
Injection, using PicoContainer, Guice or Spring.
-751// Initialize the chunkCreator
-752initializeMemStoreChunkCreator();
-753this.fileSystemManager = new 
MasterFileSystem(this);
-754this.walManager = new 
MasterWalManager(this);
-755
-756// enable table descriptors cache
-757this.tableDescriptors.setCacheOn();
-758// set the META's descriptor to the 
correct replication
-759
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
-760
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
-761// warm-up HTDs cache on master 
initialization
-762if (preLoadTableDescriptors) {
-763  status.setStatus("Pre-loading table 
descriptors");
-764  this.tableDescriptors.getAll();
-765}
-766
-767// publish cluster ID
-768status.setStatus("Publishing Cluster 
ID in ZooKeeper");
-769
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
-770this.initLatch.countDown();
+751this.fileSystemManager = new 
MasterFileSystem(this);
+752this.walManager = new 
MasterWalManager(this);
+753
+754// enable table descriptors cache
+755this.tableDescriptors.setCacheOn();
+756// set the META's descriptor to the 
correct replication
+757
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
+758
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
+759// warm-up HTDs cache on master 
initialization
+760if (preLoadTableDescriptors) {
+761  status.setStatus("Pre-loading table 
descriptors");
+762  this.tableDescriptors.getAll();
+763}
+764
+765// publish cluster ID
+766status.setStatus("Publishing Cluster 
ID in ZooKeeper");
+767
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
+768this.initLatch.countDown();
+769
+770this.serverManager = 
createServerManager(this);
 771
-772this.serverManager = 
createServerManager(this);
+772this.tableStateManager = new 
TableStateManager(this);
 773
-774this.tableStateManager = new 
TableStateManager(this);
-775
-776status.setStatus("Initializing ZK 
system trackers");
-777initializeZKBasedSystemTrackers();
-778
-779// This is for backwards 
compatibility
-780// See HBASE-11393
-781status.setStatus("Update TableCFs 
node in ZNode");
-782TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
-783conf, 
this.clusterConnection);
-784tableCFsUpdater.update();
-785
-786// initialize master side 
coprocessors before we start handling requests
-787status.setStatus("Initializing master 
coprocessors");
-788this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
-789
-790// start up all service threads.
-791status.setStatus("Initializing master 
service threads");
-792startServiceThreads();
-793
-794// Wake up this server to check in
-795sleeper.skipSleepCycle();
-796
-797// Wait for region servers to report 
in
-798status.setStatus("Wait for region 
servers to report in");
-799waitForRegionServers(status);
-800
-801// get a list for previously failed 
RS which need log splitting work
-802// we recover hbase:meta region 
servers inside master initialization and
-803// handle other failed servers in SSH 
in order to start up master node ASAP
-804MasterMetaBootstrap metaBootstrap = 
createMetaBootstrap(this, status);
-805
metaBootstrap.splitMetaLogsBeforeAssignment();
+774status.setStatus("Initializing ZK 
system trackers");
+775initializeZKBasedSystemTrackers();
+776
+777// This is for backwards 
compatibility
+778// See HBASE-11393
+779status.setStatus("Update TableCFs 
node in ZNode");
+780TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
+781conf, 
this.clusterConnection);
+782tableCFsUpdater.update();
+783
+784// initialize master side 
coprocessors before we start handling requests
+785status.setStatus("Initializing master 
coprocessors");
+786this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
+787
+788// start up all service threads.
+789

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index e64a477..0dc47d0 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -1154,8 +1154,8 @@ implements initializeFileSystem()
 
 
-private void
-initializeMemStoreChunkPool()
+protected void
+initializeMemStoreChunkCreator()
 
 
 private void
@@ -2367,7 +2367,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 UNSPECIFIED_REGION
-private static finalbyte[] UNSPECIFIED_REGION
+private static finalbyte[] UNSPECIFIED_REGION
 
 
 
@@ -2376,7 +2376,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 movedRegions
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
 
 
 
@@ -2385,7 +2385,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 TIMEOUT_REGION_MOVED
-private static finalint TIMEOUT_REGION_MOVED
+private static finalint TIMEOUT_REGION_MOVED
 
 See Also:
 Constant
 Field Values
@@ -2883,13 +2883,13 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 
-
+
 
 
 
 
-initializeMemStoreChunkPool
-privatevoidinitializeMemStoreChunkPool()
+initializeMemStoreChunkCreator
+protectedvoidinitializeMemStoreChunkCreator()
 
 
 
@@ -2898,7 +2898,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 startHeapMemoryManager
-privatevoidstartHeapMemoryManager()
+privatevoidstartHeapMemoryManager()
 
 
 
@@ -2907,7 +2907,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 createMyEphemeralNode
-privatevoidcreateMyEphemeralNode()
+privatevoidcreateMyEphemeralNode()
 throws org.apache.zookeeper.KeeperException,
http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -2923,7 +2923,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 deleteMyEphemeralNode
-privatevoiddeleteMyEphemeralNode()
+privatevoiddeleteMyEphemeralNode()
 throws org.apache.zookeeper.KeeperException
 
 Throws:
@@ -2937,7 +2937,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getRegionServerAccounting
-publicRegionServerAccountinggetRegionServerAccounting()
+publicRegionServerAccountinggetRegionServerAccounting()
 
 Specified by:
 getRegionServerAccountingin
 interfaceRegionServerServices
@@ -2952,7 +2952,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 createRegionLoad
-org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(Regionr,
+org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(Regionr,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.BuilderregionLoadBldr,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.BuilderregionSpecifier)

throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -2968,7 +2968,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 createRegionLoad
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName)
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName)
 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
deleted file mode 100644
index 4526143..000
--- a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ /dev/null
@@ -1,1827 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Interface org.apache.hadoop.hbase.Cell (Apache HBase 
2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses 
of Interfaceorg.apache.hadoop.hbase.Cell
-
-
-
-
-
-Packages that use Cell
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase
-
-
-
-org.apache.hadoop.hbase.client
-
-Provides HBase Client
-
-
-
-org.apache.hadoop.hbase.filter
-
-Provides row-level filters applied to HRegion scan results 
during calls to
- ResultScanner.next().
-
-
-
-org.apache.hadoop.hbase.mapreduce
-
-Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
-Input/OutputFormats, a table indexing MapReduce job, and utility methods.
-
-
-
-org.apache.hadoop.hbase.util
-
-
-
-
-
-
-
-
-
-
-Uses of Cell in org.apache.hadoop.hbase
-
-Methods in org.apache.hadoop.hbase
 that return Cell
-
-Modifier and Type
-Method and Description
-
-
-
-static Cell
-CellUtil.copyCellTo(Cellcell,
-  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
-  intoffset,
-  intlen)
-Clone the passed cell by copying its data into the passed 
buf.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row)
-Create a Cell with specific row.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]value)
-Create a Cell with specific row and value.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]family,
-  byte[]qualifier)
-Create a Cell with specific row.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  longtimestamp,
-  bytetype,
-  byte[]value)
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  longtimestamp,
-  bytetype,
-  byte[]value,
-  byte[]tags,
-  longmemstoreTS)
-Marked as audience Private as of 1.2.0.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  longtimestamp,
-  bytetype,
-  byte[]value,
-  longmemstoreTS)
-Marked as audience Private as of 1.2.0.
-
-
-
-static Cell
-CellUtil.createCell(byte[]row,
-  byte[]family,
-  byte[]qualifier,
-  longtimestamp,
-  org.apache.hadoop.hbase.KeyValue.Typetype,
-  byte[]value,
-  byte[]tags)
-Marked as audience Private as of 1.2.0.
-
-
-
-static Cell
-CellUtil.createCell(byte[]rowArray,
-  introwOffset,
-  introwLength,
-  byte[]familyArray,
-  intfamilyOffset,
-  intfamilyLength,
-  byte[]qualifierArray,
-  intqualifierOffset,
-  intqualifierLength)
-
-
-static Cell
-CellUtil.createCell(Cellcell,
-  byte[]tags)
-
-
-static Cell
-CellUtil.createCell(Cellcell,
-  byte[]value,
-  byte[]tags)
-
-
-static Cell
-CellUtil.createCell(Cellcell,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.Tagtags)
-
-
-static Cell
-CellUtil.createFirstDeleteFamilyCellOnRow(byte[]row,
-byte[]fam)
-Create a Delete Family Cell for the specified row and 
family that would
- be smaller than all other possible Delete Family KeyValues that have the
- same row and family.
-
-
-
-static Cell
-CellUtil.createFirstOnNextRow(Cellcell)
-Create a Cell that is smaller than all other possible Cells 
for the given Cell row's next row.
-
-
-
-static Cell
-CellUtil.createFirstOnRow(byte[]row)
-
-
-static Cell
-CellUtil.createFirstOnRow(byte[]row,
-byte[]family,
-byte[]col)
-
-
-static Cell
-CellUtil.createFirstOnRow(byte[]row,
-introffset,
-