[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
index 7e3fe59..6867e53 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
@@ -110,10 +110,11 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class TableStateManager
+public class TableStateManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 This is a helper class used to manage table states.
- States persisted in tableinfo and cached internally.
+ States persisted in tableinfo and cached internally.
+ TODO: Cache state. Cut down on meta looksups.
 
 
 
@@ -270,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -279,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 lock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReadWriteLock lock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReadWriteLock lock
 
 
 
@@ -288,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 master
-private finalMasterServices master
+private finalMasterServices master
 
 
 
@@ -305,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TableStateManager
-publicTableStateManager(MasterServicesmaster)
+publicTableStateManager(MasterServicesmaster)
 
 
 
@@ -322,7 +323,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableState
-publicvoidsetTableState(TableNametableName,
+publicvoidsetTableState(TableNametableName,
   TableState.StatenewState)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Set table state to provided.
@@ -342,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableStateIfInStates
-publicTableState.StatesetTableStateIfInStates(TableNametableName,
+publicTableState.StatesetTableStateIfInStates(TableNametableName,
 TableState.StatenewState,
 TableState.State...states)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -366,7 +367,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableStateIfNotInStates
-publicbooleansetTableStateIfNotInStates(TableNametableName,
+publicbooleansetTableStateIfNotInStates(TableNametableName,
   TableState.StatenewState,
   TableState.State...states)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -388,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isTableState
-publicbooleanisTableState(TableNametableName,
+publicbooleanisTableState(TableNametableName,
 TableState.State...states)
 
 
@@ -398,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setDeletedTable
-publicvoidsetDeletedTable(TableNametableName)
+publicvoidsetDeletedTable(TableNametableName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -412,7 +413,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isTablePresent
-publicbooleanisTablePresent(TableNametableName)
+publicbooleanisTablePresent(TableNametableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -426,7 +427,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTablesInStates
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
index fca3b23..a512ad0 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
@@ -289,7 +289,7 @@ the order they are declared.
 
 
 values
-public staticMetaTableAccessor.QueryType[]values()
+public staticMetaTableAccessor.QueryType[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -309,7 +309,7 @@ for (MetaTableAccessor.QueryType c : 
MetaTableAccessor.QueryType.values())
 
 
 valueOf
-public staticMetaTableAccessor.QueryTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticMetaTableAccessor.QueryTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/backup/class-use/BackupInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/class-use/BackupInfo.html 
b/devapidocs/org/apache/hadoop/hbase/backup/class-use/BackupInfo.html
index fd61776..a7b63cf 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/class-use/BackupInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/class-use/BackupInfo.html
@@ -336,14 +336,14 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListBackupInfo
-BackupAdminImpl.getHistory(intn)
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListBackupInfo
 BackupSystemTable.getHistory(intn)
 Get first n backup history records
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListBackupInfo
+BackupAdminImpl.getHistory(intn)
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListBackupInfo
 BackupAdminImpl.getHistory(intn,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index c589952..707f172 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -166,10 +166,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.BackupType
 org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
+org.apache.hadoop.hbase.backup.BackupType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
index 436cd7e..dc2069a 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
@@ -416,11 +416,11 @@
 
 
 private Abortable
-RpcExecutor.abortable
+SimpleRpcScheduler.abortable
 
 
 private Abortable
-SimpleRpcScheduler.abortable
+RpcExecutor.abortable
 
 
 
@@ -661,9 +661,11 @@
 
 
 RpcScheduler
-FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
+RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
   PriorityFunctionpriority,
-  Abortableserver)
+  

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
index 7800ef6..e8361b7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class NormalUserScanQueryMatcher
+public abstract class NormalUserScanQueryMatcher
 extends UserScanQueryMatcher
 Query matcher for normal user scan.
 
@@ -173,7 +173,7 @@ extends 
-private boolean
+protected boolean
 seePastDeleteMarkers
 whether time range queries can see rows "behind" a 
delete
 
@@ -240,13 +240,13 @@ extends 
 static NormalUserScanQueryMatcher
-create(Scanscan,
+create(Scanscan,
   ScanInfoscanInfo,
   ColumnTrackercolumns,
+  DeleteTrackerdeletes,
   booleanhasNullColumn,
   longoldestUnexpiredTS,
-  longnow,
-  RegionCoprocessorHostregionCoprocessorHost)
+  longnow)
 
 
 protected boolean
@@ -282,7 +282,7 @@ extends ScanQueryMatcher
-checkColumn,
 checkDeleted,
 clearCurrentRow,
 compareKeyForNextColumn,
 compareK
 eyForNextRow, createStartKeyFromRow,
 currentRow,
 getKeyForNextColumn,
 getStartKey,
 instantiateDeleteTracker,
 preCheck, setToNewRow
+checkColumn,
 checkDeleted,
 clearCurrentRow,
 compareKeyForNextColumn,
 compareK
 eyForNextRow, createStartKeyFromRow,
 currentRow,
 getKeyForNextColumn,
 getStartKey,
 getTrackers,
 preCheck,
 setToNewRow
 
 
 
@@ -311,7 +311,7 @@ extends 
 
 deletes
-private finalDeleteTracker deletes
+private finalDeleteTracker deletes
 Keeps track of deletes
 
 
@@ -321,7 +321,7 @@ extends 
 
 get
-private finalboolean get
+private finalboolean get
 True if we are doing a 'Get' Scan. Every Get is actually a 
one-row Scan.
 
 
@@ -331,7 +331,7 @@ extends 
 
 seePastDeleteMarkers
-private finalboolean seePastDeleteMarkers
+protected finalboolean seePastDeleteMarkers
 whether time range queries can see rows "behind" a 
delete
 
 
@@ -349,7 +349,7 @@ extends 
 
 NormalUserScanQueryMatcher
-protectedNormalUserScanQueryMatcher(Scanscan,
+protectedNormalUserScanQueryMatcher(Scanscan,
  ScanInfoscanInfo,
  ColumnTrackercolumns,
  booleanhasNullColumn,
@@ -372,7 +372,7 @@ extends 
 
 beforeShipped
-publicvoidbeforeShipped()
+publicvoidbeforeShipped()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ShipperListener
 The action that needs to be performed before Shipper.shipped()
 is performed
@@ -392,7 +392,7 @@ extends 
 
 match
-publicScanQueryMatcher.MatchCodematch(Cellcell)
+publicScanQueryMatcher.MatchCodematch(Cellcell)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:ScanQueryMatcher
 Determines if the caller should do one of several things:
@@ -422,7 +422,7 @@ extends 
 
 reset
-protectedvoidreset()
+protectedvoidreset()
 
 Specified by:
 resetin
 classScanQueryMatcher
@@ -435,26 +435,26 @@ extends 
 
 isGet
-protectedbooleanisGet()
+protectedbooleanisGet()
 
 Specified by:
 isGetin
 classUserScanQueryMatcher
 
 
 
-
+
 
 
 
 
 create
-public staticNormalUserScanQueryMatchercreate(Scanscan,
+public staticNormalUserScanQueryMatchercreate(Scanscan,
 ScanInfoscanInfo,
 ColumnTrackercolumns,
+DeleteTrackerdeletes,
 booleanhasNullColumn,
 longoldestUnexpiredTS,
-longnow,
-RegionCoprocessorHostregionCoprocessorHost)
+longnow)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -490,7 +490,7 @@ extends 
 
-PrevClass
+PrevClass
 NextClass
 
 


[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
index 197b13b..770b46e 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
@@ -630,24 +630,28 @@
 
 
 
+private static class
+ProcedureExecutor.FailedProcedureTEnvironment
+
+
 class
 ProcedureInMemoryChoreTEnvironment
 Special procedure used as a chore.
 
 
-
+
 class
 SequentialProcedureTEnvironment
 A SequentialProcedure describes one step in a procedure 
chain:
 
 
-
+
 class
 StateMachineProcedureTEnvironment,TState
 Procedure described by a series of steps.
 
 
-
+
 class
 TwoPhaseProcedureTEnvironment
 
@@ -668,6 +672,10 @@
 private Procedure?
 LockAndQueue.exclusiveLockOwnerProcedure
 
+
+private Procedure?
+ProcedureExecutor.CompletedProcedureRetainer.procedure
+
 
 
 
@@ -754,24 +762,40 @@
 ProcedureInMemoryChore.execute(TEnvironmentenv)
 
 
+protected ProcedureTEnvironment[]
+ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
+
+
 protected abstract ProcedureTEnvironment[]
 Procedure.execute(TEnvironmentenv)
 The main code of the procedure.
 
 
-
+
 Procedure?
 LockAndQueue.getExclusiveLockOwnerProcedure()
 
-
+
 Procedure?
 LockStatus.getExclusiveLockOwnerProcedure()
 
+
+Procedure?
+ProcedureExecutor.CompletedProcedureRetainer.getProcedure()
+
 
 Procedure
 ProcedureExecutor.getProcedure(longprocId)
 
 
+Procedure
+ProcedureExecutor.getResult(longprocId)
+
+
+Procedure
+ProcedureExecutor.getResultOrProcedure(longprocId)
+
+
 private Procedure[]
 ProcedureExecutor.initializeChildren(RootProcedureStateprocStack,
   Procedureprocedure,
@@ -821,13 +845,15 @@
 
 
 
-PairProcedureInfo,Procedure
-ProcedureExecutor.getResultOrProcedure(longprocId)
-
-
 protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedure
 RootProcedureState.getSubproceduresStack()
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedure
+ProcedureExecutor.listProcedures()
+List procedures.
+
+
 
 
 
@@ -1156,6 +1182,9 @@
 
 
 
+CompletedProcedureRetainer(Procedure?procedure)
+
+
 DelayedProcedure(Procedureprocedure)
 
 
@@ -1174,7 +1203,7 @@
 
 
 Procedure
-ProcedureStore.ProcedureIterator.nextAsProcedure()
+ProcedureStore.ProcedureIterator.next()
 Returns the next procedure in the iteration.
 
 
@@ -1277,7 +1306,7 @@
 
 
 Procedure
-ProcedureWALFormatReader.EntryIterator.nextAsProcedure()
+ProcedureWALFormatReader.EntryIterator.next()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.CompletedProcedureRetainer.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.CompletedProcedureRetainer.html
new file mode 100644
index 000..38cdb42
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -0,0 +1,185 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor.CompletedProcedureRetainer 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.ProcedureExecutor.CompletedProcedureRetainer
+
+
+
+
+
+Packages that use ProcedureExecutor.CompletedProcedureRetainer
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.procedure2
+
+
+
+
+
+
+
+
+
+
+Uses of ProcedureExecutor.CompletedProcedureRetainer
 in org.apache.hadoop.hbase.procedure2
+
+Fields in org.apache.hadoop.hbase.procedure2
 with type parameters of type ProcedureExecutor.CompletedProcedureRetainer
+
+Modifier and 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index 6c200a1..e6f8c2e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -1350,726 +1350,719 @@
 1342  }
 1343
 1344  @Override
-1345  public 
MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
-1346  RpcController controller, 
MasterProtos.DispatchMergingRegionsRequest request)
-1347  throws ServiceException {
-1348return 
stub.dispatchMergingRegions(controller, request);
-1349  }
-1350
-1351  @Override
-1352  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
-1353  
MasterProtos.AssignRegionRequest request) throws ServiceException {
-1354return 
stub.assignRegion(controller, request);
-1355  }
-1356
-1357  @Override
-1358  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
-1359  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
-1360return 
stub.unassignRegion(controller, request);
-1361  }
-1362
-1363  @Override
-1364  public 
MasterProtos.OfflineRegionResponse offlineRegion(RpcController controller,
-1365  
MasterProtos.OfflineRegionRequest request) throws ServiceException {
-1366return 
stub.offlineRegion(controller, request);
-1367  }
-1368
-1369  @Override
-1370  public 
MasterProtos.SplitTableRegionResponse splitRegion(RpcController controller,
-1371  
MasterProtos.SplitTableRegionRequest request) throws ServiceException {
-1372return 
stub.splitRegion(controller, request);
-1373  }
-1374
-1375  @Override
-1376  public 
MasterProtos.DeleteTableResponse deleteTable(RpcController controller,
-1377  
MasterProtos.DeleteTableRequest request) throws ServiceException {
-1378return 
stub.deleteTable(controller, request);
-1379  }
-1380
-1381  @Override
-1382  public 
MasterProtos.TruncateTableResponse truncateTable(RpcController controller,
-1383  
MasterProtos.TruncateTableRequest request) throws ServiceException {
-1384return 
stub.truncateTable(controller, request);
-1385  }
-1386
-1387  @Override
-1388  public 
MasterProtos.EnableTableResponse enableTable(RpcController controller,
-1389  
MasterProtos.EnableTableRequest request) throws ServiceException {
-1390return 
stub.enableTable(controller, request);
-1391  }
-1392
-1393  @Override
-1394  public 
MasterProtos.DisableTableResponse disableTable(RpcController controller,
-1395  
MasterProtos.DisableTableRequest request) throws ServiceException {
-1396return 
stub.disableTable(controller, request);
-1397  }
-1398
-1399  @Override
-1400  public 
MasterProtos.ModifyTableResponse modifyTable(RpcController controller,
-1401  
MasterProtos.ModifyTableRequest request) throws ServiceException {
-1402return 
stub.modifyTable(controller, request);
-1403  }
-1404
-1405  @Override
-1406  public 
MasterProtos.CreateTableResponse createTable(RpcController controller,
-1407  
MasterProtos.CreateTableRequest request) throws ServiceException {
-1408return 
stub.createTable(controller, request);
-1409  }
-1410
-1411  @Override
-1412  public 
MasterProtos.ShutdownResponse shutdown(RpcController controller,
-1413  MasterProtos.ShutdownRequest 
request) throws ServiceException {
-1414return stub.shutdown(controller, 
request);
-1415  }
-1416
-1417  @Override
-1418  public 
MasterProtos.StopMasterResponse stopMaster(RpcController controller,
-1419  MasterProtos.StopMasterRequest 
request) throws ServiceException {
-1420return 
stub.stopMaster(controller, request);
+1345  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
+1346  
MasterProtos.AssignRegionRequest request) throws ServiceException {
+1347return 
stub.assignRegion(controller, request);
+1348  }
+1349
+1350  @Override
+1351  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
+1352  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
+1353return 
stub.unassignRegion(controller, request);
+1354  }
+1355
+1356  @Override
+1357 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 5b6f058..3489918 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2247
 0
 0
-14906
+14922
 
 Files
 
@@ -412,7 +412,7 @@
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
-39
+42
 
 org/apache/hadoop/hbase/HConstants.java
 0
@@ -747,7 +747,7 @@
 org/apache/hadoop/hbase/client/Action.java
 0
 0
-1
+2
 
 org/apache/hadoop/hbase/client/Admin.java
 0
@@ -982,7 +982,7 @@
 org/apache/hadoop/hbase/client/HTable.java
 0
 0
-26
+28
 
 org/apache/hadoop/hbase/client/HTableMultiplexer.java
 0
@@ -1032,7 +1032,7 @@
 org/apache/hadoop/hbase/client/MultiAction.java
 0
 0
-3
+8
 
 org/apache/hadoop/hbase/client/MultiResponse.java
 0
@@ -1102,7 +1102,7 @@
 org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
 0
 0
-1
+2
 
 org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java
 0
@@ -1117,7 +1117,7 @@
 org/apache/hadoop/hbase/client/RegionServerCallable.java
 0
 0
-1
+2
 
 org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java
 0
@@ -1212,7 +1212,7 @@
 org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 0
 0
-4
+5
 
 org/apache/hadoop/hbase/client/Scan.java
 0
@@ -1222,7 +1222,7 @@
 org/apache/hadoop/hbase/client/ScannerCallable.java
 0
 0
-5
+6
 
 org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java
 0
@@ -1232,7 +1232,7 @@
 org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
 0
 0
-17
+18
 
 org/apache/hadoop/hbase/client/ServerStatisticTracker.java
 0
@@ -7289,7 +7289,7 @@
 
 annotation
 http://checkstyle.sourceforge.net/config_annotation.html#MissingDeprecated;>MissingDeprecated
-137
+140
 Error
 
 blocks
@@ -7299,7 +7299,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly;>LeftCurly
-354
+356
 Error
 
 
@@ -7366,7 +7366,7 @@
 ordered: true
 sortStaticImportsAlphabetically: true
 option: top
-1024
+1027
 Error
 
 
@@ -7378,7 +7378,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-112
+114
 Error
 
 indentation
@@ -7396,12 +7396,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-803
+797
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3253
+3259
 Error
 
 misc
@@ -7419,7 +7419,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-982
+988
 Error
 
 
@@ -9128,92 +9128,110 @@
 106
 
 Error
-javadoc
-JavadocTagContinuationIndentation
-Line continuation have incorrect indentation level, expected level should 
be 2.
-117
+annotation
+MissingDeprecated
+Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
+109
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-146
+118
+
+Error
+annotation
+MissingDeprecated
+Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
+121
+
+Error
+annotation
+MissingDeprecated
+Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
+132
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-148
+149
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-290
+151
 
 Error
 javadoc
-NonEmptyAtclauseDescription
-At-clause should have a non-empty description.
-335
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+293
 
 Error
 javadoc
-JavadocTagContinuationIndentation
-Line continuation have incorrect indentation level, expected level should 
be 2.
-373
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+338
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-403
+376
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-442
+406
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-457
+445
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-514
+460
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
index 144336c..e7b3b80 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
@@ -326,7 +326,7 @@
 
 
 private void
-SimpleLoadBalancer.addRegionPlan(com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
+SimpleLoadBalancer.addRegionPlan(org.apache.hadoop.hbase.shaded.com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
  booleanfetchFromTail,
  ServerNamesn,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn)
@@ -335,7 +335,7 @@
 
 
 private void
-SimpleLoadBalancer.addRegionPlan(com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
+SimpleLoadBalancer.addRegionPlan(org.apache.hadoop.hbase.shaded.com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
  booleanfetchFromTail,
  ServerNamesn,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn)
@@ -344,10 +344,10 @@
 
 
 void
-SimpleLoadBalancer.balanceOverall(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn,
+SimpleLoadBalancer.balanceOverall(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn,
   http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,SimpleLoadBalancer.BalanceInfoserverBalanceInfo,
   booleanfetchFromTail,
-  com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
+  
org.apache.hadoop.hbase.shaded.com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
   intmax,
   intmin)
 If we need to balanceoverall, we need to add one more round 
to peel off one region from each max.
@@ -355,10 +355,10 @@
 
 
 void
-SimpleLoadBalancer.balanceOverall(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn,
+SimpleLoadBalancer.balanceOverall(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanregionsToReturn,
   http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,SimpleLoadBalancer.BalanceInfoserverBalanceInfo,
   booleanfetchFromTail,
-  com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
+  
org.apache.hadoop.hbase.shaded.com.google.common.collect.MinMaxPriorityQueueRegionPlanregionsToMove,
   intmax,
   intmin)
 If we need to balanceoverall, we need to add one more round 
to peel off one region from each max.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/master/locking/LockProcedure.LockType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/locking/LockProcedure.LockType.html 
b/devapidocs/org/apache/hadoop/hbase/master/locking/LockProcedure.LockType.html
index 6b6fdc0..0cc0763 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/locking/LockProcedure.LockType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/locking/LockProcedure.LockType.html
@@ -236,7 +236,7 @@ the order they are declared.
 
 
 values
-public staticLockProcedure.LockType[]values()
+public staticLockProcedure.LockType[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -256,7 +256,7 @@ for (LockProcedure.LockType c : 
LockProcedure.LockType.values())
 
 
 valueOf
-public staticLockProcedure.LockTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticLockProcedure.LockTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating status 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 6f1f58b..5a6b9fc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -240,111 +240,119 @@
 
 
 private void
+SplitTableRegionProcedure.checkSplittable(MasterProcedureEnvenv,
+   HRegionInforegionToSplit,
+   byte[]splitRow)
+Check whether the region is splittable
+
+
+
+private void
 MergeTableRegionsProcedure.cleanupMergedRegion(MasterProcedureEnvenv)
 Clean up merged region
 
 
-
+
 private AssignProcedure[]
 SplitTableRegionProcedure.createAssignProcedures(MasterProcedureEnvenv,
   intregionReplication)
 
-
+
 private AssignProcedure[]
 MergeTableRegionsProcedure.createAssignProcedures(MasterProcedureEnvenv,
   intregionReplication)
 
-
+
 void
 SplitTableRegionProcedure.createDaughterRegions(MasterProcedureEnvenv)
 Create daughter regions
 
 
-
+
 private GCRegionProcedure[]
 GCMergedRegionsProcedure.createGCRegionProcedures(MasterProcedureEnvenv)
 
-
+
 private void
 MergeTableRegionsProcedure.createMergedRegion(MasterProcedureEnvenv)
 Create merged region
 
 
-
+
 private UnassignProcedure[]
 SplitTableRegionProcedure.createUnassignProcedures(MasterProcedureEnvenv,
 intregionReplication)
 
-
+
 private UnassignProcedure[]
 MergeTableRegionsProcedure.createUnassignProcedures(MasterProcedureEnvenv,
 intregionReplication)
 
-
+
 protected Procedure[]
 RegionTransitionProcedure.execute(MasterProcedureEnvenv)
 
-
+
 protected StateMachineProcedure.Flow
 GCMergedRegionsProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsStatestate)
 
-
+
 protected StateMachineProcedure.Flow
 GCRegionProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCRegionStatestate)
 
-
+
 protected StateMachineProcedure.Flow
 MergeTableRegionsProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStatestate)
 
-
+
 protected StateMachineProcedure.Flow
 MoveRegionProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
 
-
+
 protected StateMachineProcedure.Flow
 SplitTableRegionProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
 
-
+
 protected void
 UnassignProcedure.finishTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode)
 
-
+
 protected abstract void
 RegionTransitionProcedure.finishTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode)
 
-
+
 protected void
 AssignProcedure.finishTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode)
 
-
+
 private ServerName
 SplitTableRegionProcedure.getParentRegionServerName(MasterProcedureEnvenv)
 
-
+
 protected ProcedureMetrics
 UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
 
-
+
 protected ProcedureMetrics
 SplitTableRegionProcedure.getProcedureMetrics(MasterProcedureEnvenv)
 
-
+
 protected ProcedureMetrics
 MergeTableRegionsProcedure.getProcedureMetrics(MasterProcedureEnvenv)
 
-
+
 protected ProcedureMetrics
 AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
 
-
+
 (package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
 Util.getRegionInfoResponse(MasterProcedureEnvenv,
  ServerNameregionLocation,
@@ -352,6 +360,13 @@
 Raw call to remote regionserver to get info on a particular 
region.
 
 
+
+(package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
+Util.getRegionInfoResponse(MasterProcedureEnvenv,
+ ServerNameregionLocation,
+ HRegionInfohri,
+ booleanincludeBestSplitRow)
+
 
 private int
 SplitTableRegionProcedure.getRegionReplication(MasterProcedureEnvenv)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 
PipelineAckStatusGetter {
-139Status get(PipelineAckProto ack);
-140  }
-141
-142  private static final 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-099import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index e9af038..53cae9a 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -78,1244 +78,1245 @@
 070import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 071import 
org.apache.hadoop.hbase.client.Admin;
 072import 
org.apache.hadoop.hbase.client.ClientServiceCallable;
-073import 
org.apache.hadoop.hbase.client.Connection;
-074import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-075import 
org.apache.hadoop.hbase.client.RegionLocator;
-076import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-077import 
org.apache.hadoop.hbase.client.SecureBulkLoadClient;
-078import 
org.apache.hadoop.hbase.client.Table;
-079import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-080import 
org.apache.hadoop.hbase.io.HFileLink;
-081import 
org.apache.hadoop.hbase.io.HalfStoreFileReader;
-082import 
org.apache.hadoop.hbase.io.Reference;
-083import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-084import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-085import 
org.apache.hadoop.hbase.io.hfile.HFile;
-086import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-087import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-088import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-089import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-090import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-091import 
org.apache.hadoop.hbase.regionserver.BloomType;
-092import 
org.apache.hadoop.hbase.regionserver.HStore;
-093import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-094import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-095import 
org.apache.hadoop.hbase.security.UserProvider;
-096import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-097import 
org.apache.hadoop.hbase.util.Bytes;
-098import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-099import 
org.apache.hadoop.hbase.util.Pair;
-100import org.apache.hadoop.util.Tool;
-101import 
org.apache.hadoop.util.ToolRunner;
-102
-103/**
-104 * Tool to load the output of 
HFileOutputFormat into an existing table.
-105 */
-106@InterfaceAudience.Public
-107public class LoadIncrementalHFiles 
extends Configured implements Tool {
-108  private static final Log LOG = 
LogFactory.getLog(LoadIncrementalHFiles.class);
-109  private boolean initalized = false;
-110
-111  public static final String NAME = 
"completebulkload";
-112  static final String 
RETRY_ON_IO_EXCEPTION = "hbase.bulkload.retries.retryOnIOException";
-113  public static final String 
MAX_FILES_PER_REGION_PER_FAMILY
-114= 
"hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily";
-115  private static final String 
ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers";
-116  public final static String 
CREATE_TABLE_CONF_KEY = "create.table";
-117  public final static String 
IGNORE_UNMATCHED_CF_CONF_KEY = "ignore.unmatched.families";
-118  public final static String 
ALWAYS_COPY_FILES = "always.copy.files";
-119
-120  // We use a '.' prefix which is ignored 
when walking directory trees
-121  // above. It is invalid family name.
-122  final static String TMP_DIR = ".tmp";
-123
-124  private int 
maxFilesPerRegionPerFamily;
-125  private boolean assignSeqIds;
-126  private SetString 
unmatchedFamilies = new HashSet();
-127
-128  // Source filesystem
-129  private FileSystem fs;
-130  // Source delegation token
-131  private FsDelegationToken 
fsDelegationToken;
-132  private String bulkToken;
-133  private UserProvider userProvider;
-134  private int nrThreads;
-135  private RpcControllerFactory 
rpcControllerFactory;
-136  private AtomicInteger numRetries;
-137
-138  private MapLoadQueueItem, 
ByteBuffer retValue = null;
-139
-140  public 
LoadIncrementalHFiles(Configuration conf) throws Exception {
-141super(conf);
-142this.rpcControllerFactory = new 
RpcControllerFactory(conf);
-143initialize();
-144  }
-145
-146  private void initialize() throws 
IOException {
-147if (initalized) {
-148  return;
-149}
-150// make a copy, just to be sure we're 
not overriding someone else's config
-151
setConf(HBaseConfiguration.create(getConf()));
-152Configuration conf = getConf();
-153// disable blockcache for tool 
invocation, see HBASE-10500
-154
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
-155this.userProvider = 
UserProvider.instantiate(conf);
-156this.fsDelegationToken = new 
FsDelegationToken(userProvider, "renewer");
-157assignSeqIds = 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
+060 * Cacheable Blocks of an {@link HFile} 
version 2 file.
+061 * Version 2 was introduced in 
hbase-0.92.0.
+062 *
+063 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
+064 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857). Support
+065 * for Version 1 was removed in 
hbase-1.3.0.
 066 *
 067 * h3HFileBlock: Version 
2/h3
 068 * In version 2, a block is structured as 
follows:
@@ -120,582 +120,582 @@
 112public class HFileBlock implements 
Cacheable {
 113  private static final Log LOG = 
LogFactory.getLog(HFileBlock.class);
 114
-115  /** Type of block. Header field 0. */
-116  private BlockType blockType;
-117
-118  /**
-119   * Size on disk excluding header, 
including checksum. Header field 1.
-120   * @see Writer#putHeader(byte[], int, 
int, int, int)
-121   */
-122  private int onDiskSizeWithoutHeader;
-123
-124  /**
-125   * Size of pure data. Does not include 
header or checksums. Header field 2.
-126   * @see Writer#putHeader(byte[], int, 
int, int, int)
-127   */
-128  private int 
uncompressedSizeWithoutHeader;
-129
-130  /**
-131   * The offset of the previous block on 
disk. Header field 3.
-132   * @see Writer#putHeader(byte[], int, 
int, int, int)
-133   */
-134  private long prevBlockOffset;
-135
-136  /**
-137   * Size on disk of header + data. 
Excludes checksum. Header field 6,
-138   * OR calculated from {@link 
#onDiskSizeWithoutHeader} when using HDFS checksum.
-139   * @see Writer#putHeader(byte[], int, 
int, int, int)
-140   */
-141  private int onDiskDataSizeWithHeader;
-142
-143
-144  /**
-145   * The in-memory representation of the 
hfile block. Can be on or offheap. Can be backed by
-146   * a single ByteBuffer or by many. Make 
no assumptions.
-147   *
-148   * pBe careful reading from 
this codebuf/code. Duplicate and work on the duplicate or if
-149   * not, be sure to reset position and 
limit else trouble down the road.
-150   *
-151   * pTODO: Make this read-only 
once made.
-152   *
-153   * pWe are using the ByteBuff 
type. ByteBuffer is not extensible yet we need to be able to have
-154   * a ByteBuffer-like API across 
multiple ByteBuffers reading from a cache such as BucketCache.
-155   * So, we have this ByteBuff type. 
Unfortunately, it is spread all about HFileBlock. Would be
-156   * good if could be confined to 
cache-use only but hard-to-do.
-157   */
-158  private ByteBuff buf;
-159
-160  /** Meta data that holds meta 
information on the hfileblock.
-161   */
-162  private HFileContext fileContext;
-163
-164  /**
-165   * The offset of this block in the 
file. Populated by the reader for
-166   * convenience of access. This offset 
is not part of the block header.
-167   */
-168  private long offset = UNSET;
-169
-170  private MemoryType memType = 
MemoryType.EXCLUSIVE;
-171
-172  /**
-173   * The on-disk size of the next block, 
including the header and checksums if present, obtained by
-174   * peeking into the first {@link 
HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's
-175   * header, or UNSET if unknown.
-176   *
-177   * Blocks try to carry the size of the 
next block to read in this data member. They will even have
-178   * this value when served from cache. 
Could save a seek in the case where we are iterating through
-179   * a file and some of the blocks come 
from cache. If from cache, then having this info to hand
-180   * will save us doing a seek to read 
the header so we can read the body of a block.
-181   * TODO: see how effective this is at 
saving seeks.
-182   */
-183  private int nextBlockOnDiskSize = 
UNSET;
-184
-185  /**
-186   * On a checksum failure, do these many 
succeeding read requests using hdfs checksums before
-187   * auto-reenabling hbase checksum 
verification.
-188   */
-189  static final int 
CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3;
-190
-191  private 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncConnectionImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncConnectionImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncConnectionImpl.html
index 6ae2066..6b1b443 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncConnectionImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncConnectionImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -260,61 +260,67 @@ implements createRegionServerStub(ServerNameserverName)
 
 
-AsyncAdmin
-getAdmin()
-Retrieve an AsyncAdmin implementation to administer an 
HBase cluster.
+AsyncAdminBuilderRawAsyncHBaseAdmin
+getAdminBuilder()
+Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 
 
+AsyncAdminBuilderAsyncHBaseAdmin
+getAdminBuilder(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
+Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
+
+
+
 (package private) 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interface
 getAdminStub(ServerNameserverName)
 
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration()
 Returns the Configuration object used by this 
instance.
 
 
-
+
 (package private) AsyncRegionLocator
 getLocator()
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.Interface
 getMasterStub()
 
-
+
 NonceGenerator
 getNonceGenerator()
 
-
+
 AsyncTableBuilderRawAsyncTable
 getRawTableBuilder(TableNametableName)
 Returns an AsyncTableBuilder for creating 
RawAsyncTable.
 
 
-
+
 AsyncTableRegionLocator
 getRegionLocator(TableNametableName)
 Retrieve a AsyncRegionLocator implementation to inspect 
region information on a table.
 
 
-
+
 (package private) 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface
 getRegionServerStub(ServerNameserverName)
 
-
+
 private HBaseRpcController
 getRpcController()
 
-
+
 AsyncTableBuilderAsyncTable
 getTableBuilder(TableNametableName,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 Returns an AsyncTableBuilder for creating 
AsyncTable.
 
 
-
+
 private void
 makeMasterStub(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.Interfacefuture)
 
@@ -331,7 +337,7 @@ implements AsyncConnection
-getRawTable,
 getTable
+getAdmin,
 getAdmin,
 getRawTable,
 getTable
 
 
 
@@ -752,23 +758,39 @@ implements 
+
+
+
+
+
+getAdminBuilder
+publicAsyncAdminBuilderRawAsyncHBaseAdmingetAdminBuilder()
+Description copied from 
interface:AsyncConnection
+Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
+ 
+ The admin operation's returned CompletableFuture will be 
finished directly in the rpc
+ framework's callback thread, so typically you should not do any time 
consuming work inside
+ these methods.
+
+Specified by:
+getAdminBuilderin
 interfaceAsyncConnection
+
+
+
+
 
 
 
 
-getAdmin
-publicAsyncAdmingetAdmin()
-Description copied from 
interface:AsyncConnection
-Retrieve an AsyncAdmin implementation to administer an 
HBase cluster. The returned AsyncAdmin
- is not guaranteed to be thread-safe. A new instance should be created for 
each using thread.
- This is a lightweight operation. Pooling or caching of the returned 
AsyncAdmin is not
- recommended.
+getAdminBuilder
+publicAsyncAdminBuilderAsyncHBaseAdmingetAdminBuilder(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
+Description copied from 
interface:AsyncConnection
+Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 Specified by:
-getAdminin
 interfaceAsyncConnection
-Returns:
-an AsyncAdmin instance for cluster administration
+getAdminBuilderin
 interfaceAsyncConnection
+Parameters:
+pool - the 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index f4a3f64..10f21cf 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -689,20 +689,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
-org.apache.hadoop.hbase.regionserver.ImmutableSegment.Type
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.BloomType
 org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
+org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.ImmutableSegment.Type
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index e895d7c..df57a92 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -125,10 +125,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteCompare
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteCompare
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerDescription.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerDescription.html
index 1d6172f..40f6053 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerDescription.html
@@ -121,28 +121,30 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index 49714a2..d0f1508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -172,1438 +172,1562 @@
 164MapServerName, 
ListHRegionInfo clusterState;
 165
 166protected final RackManager 
rackManager;
-167
-168protected Cluster(
-169MapServerName, 
ListHRegionInfo clusterState,
-170MapString, 
DequeBalancerRegionLoad loads,
-171RegionLocationFinder 
regionFinder,
-172RackManager rackManager) {
-173  this(null, clusterState, loads, 
regionFinder, rackManager);
-174}
-175
-176@SuppressWarnings("unchecked")
-177protected Cluster(
-178CollectionHRegionInfo 
unassignedRegions,
-179MapServerName, 
ListHRegionInfo clusterState,
-180MapString, 
DequeBalancerRegionLoad loads,
-181RegionLocationFinder 
regionFinder,
-182RackManager rackManager) {
-183
-184  if (unassignedRegions == null) {
-185unassignedRegions = 
EMPTY_REGION_LIST;
-186  }
+167// Maps region - rackIndex - 
locality of region on rack
+168private float[][] rackLocalities;
+169// Maps localityType - region 
- [server|rack]Index with highest locality
+170private int[][] 
regionsToMostLocalEntities;
+171
+172protected Cluster(
+173MapServerName, 
ListHRegionInfo clusterState,
+174MapString, 
DequeBalancerRegionLoad loads,
+175RegionLocationFinder 
regionFinder,
+176RackManager rackManager) {
+177  this(null, clusterState, loads, 
regionFinder, rackManager);
+178}
+179
+180@SuppressWarnings("unchecked")
+181protected Cluster(
+182CollectionHRegionInfo 
unassignedRegions,
+183MapServerName, 
ListHRegionInfo clusterState,
+184MapString, 
DequeBalancerRegionLoad loads,
+185RegionLocationFinder 
regionFinder,
+186RackManager rackManager) {
 187
-188  serversToIndex = new 
HashMap();
-189  hostsToIndex = new 
HashMap();
-190  racksToIndex = new 
HashMap();
-191  tablesToIndex = new 
HashMap();
-192
-193  //TODO: We should get the list of 
tables from master
-194  tables = new ArrayList();
-195  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+188  if (unassignedRegions == null) {
+189unassignedRegions = 
EMPTY_REGION_LIST;
+190  }
+191
+192  serversToIndex = new 
HashMap();
+193  hostsToIndex = new 
HashMap();
+194  racksToIndex = new 
HashMap();
+195  tablesToIndex = new 
HashMap();
 196
-197  numRegions = 0;
-198
-199  ListListInteger 
serversPerHostList = new ArrayList();
-200  ListListInteger 
serversPerRackList = new ArrayList();
-201  this.clusterState = clusterState;
-202  this.regionFinder = regionFinder;
-203
-204  // Use servername and port as there 
can be dead servers in this list. We want everything with
-205  // a matching hostname and port to 
have the same index.
-206  for (ServerName sn : 
clusterState.keySet()) {
-207if (sn == null) {
-208  LOG.warn("TODO: Enable TRACE on 
BaseLoadBalancer. Empty servername); " +
-209  "skipping; unassigned 
regions?");
-210  if (LOG.isTraceEnabled()) {
-211LOG.trace("EMPTY SERVERNAME " 
+ clusterState.toString());
-212  }
-213  continue;
-214}
-215if 
(serversToIndex.get(sn.getAddress().toString()) == null) {
-216  
serversToIndex.put(sn.getHostAndPort(), numServers++);
-217}
-218if 
(!hostsToIndex.containsKey(sn.getHostname())) {
-219  
hostsToIndex.put(sn.getHostname(), numHosts++);
-220  serversPerHostList.add(new 
ArrayList(1));
+197  //TODO: We should get the list of 
tables from master
+198  tables = new ArrayList();
+199  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+200
+201  numRegions = 0;
+202
+203  ListListInteger 
serversPerHostList = new ArrayList();
+204  ListListInteger 
serversPerRackList = new ArrayList();
+205  this.clusterState = clusterState;
+206  this.regionFinder = regionFinder;
+207
+208  // Use servername and port as there 
can be dead servers in this list. We want everything with
+209  // a matching hostname and port to 
have the same index.
+210  for (ServerName sn : 
clusterState.keySet()) {
+211if (sn == null) {

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
--
diff --git 
a/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html 
b/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
index bc9ad26..4c691a0 100644
--- a/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
+++ b/devapidocs/org/apache/hadoop/metrics2/class-use/MetricHistogram.html
@@ -223,18 +223,30 @@ service.
 
 
 private MetricHistogram
-MetricsSnapshotSourceImpl.snapshotRestoreTimeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverSizeComputationTimeHisto
 
 
 private MetricHistogram
-MetricsSnapshotSourceImpl.snapshotTimeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverSnapshotFetchTimeHisto
 
 
 private MetricHistogram
-MetricsMasterFilesystemSourceImpl.splitSizeHisto
+MetricsMasterQuotaSourceImpl.snapshotObserverTimeHisto
 
 
 private MetricHistogram
+MetricsSnapshotSourceImpl.snapshotRestoreTimeHisto
+
+
+private MetricHistogram
+MetricsSnapshotSourceImpl.snapshotTimeHisto
+
+
+private MetricHistogram
+MetricsMasterFilesystemSourceImpl.splitSizeHisto
+
+
+private MetricHistogram
 MetricsMasterFilesystemSourceImpl.splitTimeHisto
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index 1e62252..1903c18 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -3268,6 +3268,7 @@
 org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore
 org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner
 org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleanerChore
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
 org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 org.apache.hadoop.hbase.master.SplitLogManager.TimeoutMonitor
 org.apache.hadoop.hbase.regionserver.StorefileRefresherChore
@@ -3393,6 +3394,8 @@
 org.apache.hadoop.hbase.snapshot.SnapshotManifestV2
 org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.ManifestBuilder (implements 
org.apache.hadoop.hbase.snapshot.SnapshotManifest.RegionVisitorTRegion,TFamily)
 org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.StoreFileReference
 org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil
 org.apache.hadoop.hbase.util.SortedListE (implements java.util.http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListE, java.util.http://docs.oracle.com/javase/8/docs/api/java/util/RandomAccess.html?is-external=true;
 title="class or interface in java.util">RandomAccess)
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot
@@ -5129,6 +5132,7 @@
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
 org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position
+org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipperThread.WorkerState
 org.apache.hadoop.hbase.security.visibility.expression.Operator
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 org.apache.hadoop.hbase.security.access.AccessController.OpType

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 58cbf79..a976616 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"30817b922ed72ba5630d8cea3d26bba9fef346e4";
+011  public static final String revision = 
"ea64dbef7f5239ab2162d0bd3dccded60e20ecda";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Fri 
Jun  9 14:39:05 UTC 2017";
+013  public static final String date = "Sat 
Jun 10 14:39:04 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "0bf0d03062d078ec9d8e0f407e783cfa";
+015  public static final String srcChecksum 
= "0347fbff381e8e942b1429ce992adda0";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index f6074e2..cf7404f 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1416,14 +1416,20 @@
 
 Add a column family to an existing table.
 
-addColumnFamily(TableName,
 HColumnDescriptor) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+addColumnFamily(TableName,
 ColumnFamilyDescriptor) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
 
 Add a column family to an existing table.
 
-addColumnFamily(TableName,
 HColumnDescriptor) - Method in class 
org.apache.hadoop.hbase.client.AsyncHBaseAdmin
+addColumnFamily(TableName,
 ColumnFamilyDescriptor) - Method in class 
org.apache.hadoop.hbase.client.AsyncHBaseAdmin
 
 addColumnFamily(TableName,
 HColumnDescriptor) - Method in class 
org.apache.hadoop.hbase.client.HBaseAdmin
 
+addColumnFamily(ColumnFamilyDescriptor)
 - Method in class org.apache.hadoop.hbase.client.TableDescriptorBuilder
+
+addColumnFamily(ColumnFamilyDescriptor)
 - Method in class org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor
+
+Adds a column family.
+
 addColumnFamily(ColumnSchemaModel)
 - Method in class org.apache.hadoop.hbase.rest.model.TableSchemaModel
 
 Add a column family to the table descriptor
@@ -1647,12 +1653,6 @@
 
 Get all columns from the specified family.
 
-addFamily(HColumnDescriptor)
 - Method in class org.apache.hadoop.hbase.client.TableDescriptorBuilder
-
-addFamily(HColumnDescriptor)
 - Method in class org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor
-
-Adds a column family.
-
 addFamily(HColumnDescriptor)
 - Method in class org.apache.hadoop.hbase.HTableDescriptor
 
 Deprecated.
@@ -6050,10 +6050,14 @@
 
 blockBuffer
 - Variable in class org.apache.hadoop.hbase.io.hfile.HFileReaderImpl.HFileScannerImpl
 
-BLOCKCACHE
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+BLOCKCACHE
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
 
 Key for the BLOCKCACHE attribute.
 
+BLOCKCACHE
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+
+Deprecated.
+
 BlockCache - Interface in org.apache.hadoop.hbase.io.hfile
 
 Block cache interface.
@@ -6072,6 +6076,8 @@
 
 The target block size used by blockcache instances.
 
+BLOCKCACHE_BYTES
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
+
 BLOCKCACHE_SIZE_GAUGE_DESC
 - Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSource
 
 BLOCKCACHE_SIZE_GAUGE_NAME
 - Static variable in interface org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSource
@@ -6330,12 +6336,14 @@
 
 blockSignal
 - Variable in class org.apache.hadoop.hbase.regionserver.MemStoreFlusher
 
-BLOCKSIZE
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+BLOCKSIZE
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
 
 Size of storefile/hfile 'blocks'.
 
-blocksize
 - Variable in class org.apache.hadoop.hbase.HColumnDescriptor
-
+BLOCKSIZE
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+
+Deprecated.
+
 blockSize
 - Variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
 
 Approximate block size
@@ -6356,6 +6364,8 @@
 
 BLOCKSIZE
 - Static variable in class org.apache.hadoop.hbase.rest.model.ColumnSchemaModel
 
+BLOCKSIZE_BYTES
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
+
 blockSizeMap
 - Variable in class org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.HFileRecordWriter
 
 blockSizeWritten()
 - Method in class org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer
@@ -6467,14 +6477,20 @@
 
 BloomContext(BloomFilterWriter,
 CellComparator) - Constructor for class 
org.apache.hadoop.hbase.util.BloomContext
 
-BLOOMFILTER
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+BLOOMFILTER
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
 
+BLOOMFILTER
 - Static variable in class org.apache.hadoop.hbase.HColumnDescriptor
+
+Deprecated.
+
 BLOOMFILTER
 - Static variable in class org.apache.hadoop.hbase.rest.model.ColumnSchemaModel
 
 BloomFilter - Interface in org.apache.hadoop.hbase.util
 
 Implements a Bloom filter, as defined by Bloom in 
1970.
 
+BLOOMFILTER_BYTES
 - Static variable in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
+
 BloomFilterBase - Interface in org.apache.hadoop.hbase.util
 
 Common methods Bloom filter methods required at read and 
write time.
@@ -6910,6 +6926,8 @@
 
 Create the AsyncTable or RawAsyncTable instance.
 
+build()
 - Method in class org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
+
 build()
 - Method in 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/src-html/org/apache/hadoop/hbase/client/Result.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Result.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Result.html
index 8f3eb65..da612e4 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Result.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Result.html
@@ -116,847 +116,892 @@
 108
 109  private final boolean readonly;
 110
-111  /**
-112   * Creates an empty Result w/ no 
KeyValue payload; returns null if you call {@link #rawCells()}.
-113   * Use this to represent no results if 
{@code null} won't do or in old 'mapred' as opposed
-114   * to 'mapreduce' package MapReduce 
where you need to overwrite a Result instance with a
-115   * {@link #copyFrom(Result)} call.
-116   */
-117  public Result() {
-118this(false);
-119  }
-120
-121  /**
-122   * Allows to construct special purpose 
immutable Result objects,
-123   * such as EMPTY_RESULT.
-124   * @param readonly whether this Result 
instance is readonly
-125   */
-126  private Result(boolean readonly) {
-127this.readonly = readonly;
-128  }
-129
-130  /**
-131   * Instantiate a Result with the 
specified List of KeyValues.
-132   * 
brstrongNote:/strong You must ensure that the keyvalues 
are already sorted.
-133   * @param cells List of cells
-134   */
-135  public static Result 
create(ListCell cells) {
-136return create(cells, null);
-137  }
-138
-139  public static Result 
create(ListCell cells, Boolean exists) {
-140return create(cells, exists, 
false);
-141  }
-142
-143  public static Result 
create(ListCell cells, Boolean exists, boolean stale) {
-144return create(cells, exists, stale, 
false);
-145  }
-146
-147  public static Result 
create(ListCell cells, Boolean exists, boolean stale,
-148  boolean mayHaveMoreCellsInRow) {
-149if (exists != null){
-150  return new Result(null, exists, 
stale, mayHaveMoreCellsInRow);
-151}
-152return new Result(cells.toArray(new 
Cell[cells.size()]), null, stale, mayHaveMoreCellsInRow);
-153  }
-154
-155  /**
-156   * Instantiate a Result with the 
specified array of KeyValues.
-157   * 
brstrongNote:/strong You must ensure that the keyvalues 
are already sorted.
-158   * @param cells array of cells
-159   */
-160  public static Result create(Cell[] 
cells) {
-161return create(cells, null, false);
-162  }
-163
-164  public static Result create(Cell[] 
cells, Boolean exists, boolean stale) {
-165return create(cells, exists, stale, 
false);
-166  }
-167
-168  public static Result create(Cell[] 
cells, Boolean exists, boolean stale,
-169  boolean mayHaveMoreCellsInRow) {
-170if (exists != null) {
-171  return new Result(null, exists, 
stale, mayHaveMoreCellsInRow);
-172}
-173return new Result(cells, null, stale, 
mayHaveMoreCellsInRow);
-174  }
-175
-176  /** Private ctor. Use {@link 
#create(Cell[])}. */
-177  private Result(Cell[] cells, Boolean 
exists, boolean stale, boolean mayHaveMoreCellsInRow) {
-178this.cells = cells;
-179this.exists = exists;
-180this.stale = stale;
-181this.mayHaveMoreCellsInRow = 
mayHaveMoreCellsInRow;
-182this.readonly = false;
-183  }
-184
-185  /**
-186   * Method for retrieving the row key 
that corresponds to
-187   * the row from which this Result was 
created.
-188   * @return row
-189   */
-190  public byte [] getRow() {
-191if (this.row == null) {
-192  this.row = (this.cells == null || 
this.cells.length == 0) ?
-193  null :
-194  
CellUtil.cloneRow(this.cells[0]);
-195}
-196return this.row;
-197  }
-198
-199  /**
-200   * Return the array of Cells backing 
this Result instance.
-201   *
-202   * The array is sorted from smallest 
-gt; largest using the
-203   * {@link CellComparator#COMPARATOR}.
-204   *
-205   * The array only contains what your 
Get or Scan specifies and no more.
-206   * For example if you request column 
"A" 1 version you will have at most 1
-207   * Cell in the array. If you request 
column "A" with 2 version you will
-208   * have at most 2 Cells, with the first 
one being the newer timestamp and
-209   * the second being the older timestamp 
(this is the sort order defined by
-210   * {@link CellComparator#COMPARATOR}).  
If columns don't exist, they won't be
-211   * present in the result. Therefore if 
you ask for 1 version all columns,
-212   * it is safe to iterate over this 
array and expect to see 1 Cell for
-213   * each column and no more.
-214   *
-215   * This API is faster than using 
getFamilyMap() and getMap()
-216   *
-217   * @return array of Cells; can be null 
if nothing in the result
-218   */
-219  public Cell[] rawCells() {
-220return cells;
-221  }
-222
-223  /**
-224   * Create a sorted list of the Cell's 
in this result.
+111  private Cursor cursor = null;
+112
+113  /**

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.html
index 6f8ec43..cb51813 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.html
@@ -264,7 +264,7 @@ implements Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcId, getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics, getProcId,
 getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime, getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, 
setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.html
index 90cdfad..3059dac 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.html
@@ -331,7 +331,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcId, getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 s
 etChildrenLatch, setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
index 33d2175..f9ef80a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
@@ -43,394 +43,410 @@
 035import org.apache.commons.logging.Log;
 036import 
org.apache.commons.logging.LogFactory;
 037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.TableName;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-043import 
org.apache.hadoop.hbase.util.Pair;
-044
-045/**
-046 * This class has the logic for handling 
scanners for regions with and without replicas.
-047 * 1. A scan is attempted on the default 
(primary) region
-048 * 2. The scanner sends all the RPCs to 
the default region until it is done, or, there
-049 * is a timeout on the default (a timeout 
of zero is disallowed).
-050 * 3. If there is a timeout in (2) above, 
scanner(s) is opened on the non-default replica(s)
-051 * 4. The results from the first 
successful scanner are taken, and it is stored which server
-052 * returned the results.
-053 * 5. The next RPCs are done on the above 
stored server until it is done or there is a timeout,
-054 * in which case, the other replicas are 
queried (as in (3) above).
-055 *
-056 */
-057@InterfaceAudience.Private
-058class ScannerCallableWithReplicas 
implements RetryingCallableResult[] {
-059  private static final Log LOG = 
LogFactory.getLog(ScannerCallableWithReplicas.class);
-060  volatile ScannerCallable 
currentScannerCallable;
-061  AtomicBoolean replicaSwitched = new 
AtomicBoolean(false);
-062  final ClusterConnection cConnection;
-063  protected final ExecutorService pool;
-064  protected final int 
timeBeforeReplicas;
-065  private final Scan scan;
-066  private final int retries;
-067  private Result lastResult;
-068  private final 
RpcRetryingCallerResult[] caller;
-069  private final TableName tableName;
-070  private Configuration conf;
-071  private int scannerTimeout;
-072  private SetScannerCallable 
outstandingCallables = new HashSet();
-073  private boolean someRPCcancelled = 
false; //required for testing purposes only
-074
-075  public 
ScannerCallableWithReplicas(TableName tableName, ClusterConnection 
cConnection,
-076  ScannerCallable baseCallable, 
ExecutorService pool, int timeBeforeReplicas, Scan scan,
-077  int retries, int scannerTimeout, 
int caching, Configuration conf,
-078  RpcRetryingCallerResult [] 
caller) {
-079this.currentScannerCallable = 
baseCallable;
-080this.cConnection = cConnection;
-081this.pool = pool;
-082if (timeBeforeReplicas  0) {
-083  throw new 
IllegalArgumentException("Invalid value of operation timeout on the 
primary");
-084}
-085this.timeBeforeReplicas = 
timeBeforeReplicas;
-086this.scan = scan;
-087this.retries = retries;
-088this.tableName = tableName;
-089this.conf = conf;
-090this.scannerTimeout = 
scannerTimeout;
-091this.caller = caller;
-092  }
-093
-094  public void setClose() {
-095currentScannerCallable.setClose();
-096  }
-097
-098  public void setRenew(boolean val) {
-099
currentScannerCallable.setRenew(val);
-100  }
-101
-102  public void setCaching(int caching) {
-103
currentScannerCallable.setCaching(caching);
-104  }
-105
-106  public int getCaching() {
-107return 
currentScannerCallable.getCaching();
-108  }
-109
-110  public HRegionInfo getHRegionInfo() {
-111return 
currentScannerCallable.getHRegionInfo();
-112  }
-113
-114  public MoreResults 
moreResultsInRegion() {
-115return 
currentScannerCallable.moreResultsInRegion();
-116  }
-117
-118  public MoreResults moreResultsForScan() 
{
-119return 
currentScannerCallable.moreResultsForScan();
-120  }
-121
-122  @Override
-123  public Result [] call(int timeout) 
throws IOException {
-124// If the active replica callable was 
closed somewhere, invoke the RPC to
-125// really close it. In the case of 
regular scanners, this applies. We make couple
-126// of RPCs to a RegionServer, and 
when that region is exhausted, we set
-127// the closed flag. Then an RPC is 
required to actually close the scanner.
-128if (currentScannerCallable != null 
 currentScannerCallable.closed) {
-129  // For closing we target that exact 
scanner (and not do replica fallback 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/class-use/YouAreDeadException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/YouAreDeadException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/YouAreDeadException.html
index c38c776..d23e08f 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/YouAreDeadException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/YouAreDeadException.html
@@ -110,7 +110,7 @@
 
 
 
-(package private) void
+void
 ServerManager.regionServerReport(ServerNamesn,
   ServerLoadsl)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index 01fac22..eba119b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -302,216 +302,220 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-org.apache.hadoop.hbase.master.balancer
+org.apache.hadoop.hbase.master.assignment
 
 
 
-org.apache.hadoop.hbase.master.cleaner
+org.apache.hadoop.hbase.master.balancer
 
 
 
-org.apache.hadoop.hbase.master.locking
+org.apache.hadoop.hbase.master.cleaner
 
 
 
-org.apache.hadoop.hbase.master.normalizer
+org.apache.hadoop.hbase.master.locking
 
 
 
-org.apache.hadoop.hbase.master.procedure
+org.apache.hadoop.hbase.master.normalizer
 
 
 
-org.apache.hadoop.hbase.master.replication
+org.apache.hadoop.hbase.master.procedure
 
 
 
-org.apache.hadoop.hbase.master.snapshot
+org.apache.hadoop.hbase.master.replication
 
 
 
+org.apache.hadoop.hbase.master.snapshot
+
+
+
 org.apache.hadoop.hbase.metrics
 
 Metrics API for HBase.
 
 
-
+
 org.apache.hadoop.hbase.metrics.impl
 
 Implementation of the HBase Metrics framework.
 
 
-
+
 org.apache.hadoop.hbase.mob
 
 
-
+
 org.apache.hadoop.hbase.mob.compactions
 
 
-
+
 org.apache.hadoop.hbase.monitoring
 
 
-
+
 org.apache.hadoop.hbase.namespace
 
 
-
+
 org.apache.hadoop.hbase.nio
 
 
-
+
 org.apache.hadoop.hbase.procedure
 
 
-
+
 org.apache.hadoop.hbase.procedure.flush
 
 
-
+
 org.apache.hadoop.hbase.procedure2
 
 
-
+
 org.apache.hadoop.hbase.procedure2.store
 
 
-
+
 org.apache.hadoop.hbase.procedure2.store.wal
 
 
-
+
 org.apache.hadoop.hbase.procedure2.util
 
 
-
+
 org.apache.hadoop.hbase.quotas
 
 
-
+
 org.apache.hadoop.hbase.quotas.policies
 
 
-
+
 org.apache.hadoop.hbase.regionserver
 
 
-
+
 org.apache.hadoop.hbase.regionserver.compactions
 
 
-
+
 org.apache.hadoop.hbase.regionserver.handler
 
 
-
+
 org.apache.hadoop.hbase.regionserver.querymatcher
 
 
-
+
 org.apache.hadoop.hbase.regionserver.snapshot
 
 
-
+
 org.apache.hadoop.hbase.regionserver.throttle
 
 
-
+
 org.apache.hadoop.hbase.regionserver.wal
 
 
-
+
 org.apache.hadoop.hbase.replication
 
 Multi Cluster Replication
 
 
-
+
 org.apache.hadoop.hbase.replication.master
 
 
-
+
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.rest.filter
 
 
-
+
 org.apache.hadoop.hbase.rest.model
 
 
-
+
 org.apache.hadoop.hbase.rest.provider
 
 
-
+
 org.apache.hadoop.hbase.rest.provider.consumer
 
 
-
+
 org.apache.hadoop.hbase.rest.provider.producer
 
 
-
+
 org.apache.hadoop.hbase.rsgroup
 
 
-
+
 org.apache.hadoop.hbase.security
 
 
-
+
 org.apache.hadoop.hbase.security.access
 
 
-
+
 org.apache.hadoop.hbase.security.token
 
 
-
+
 org.apache.hadoop.hbase.security.visibility
 
 
-
+
 org.apache.hadoop.hbase.security.visibility.expression
 
 
-
+
 org.apache.hadoop.hbase.snapshot
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.thrift2
 
 Provides an HBase http://thrift.apache.org/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.tool
 
 
-
+
 org.apache.hadoop.hbase.trace
 
 
-
+
 org.apache.hadoop.hbase.types
 
 
@@ -519,47 +523,47 @@ service.
  extensible data type API.
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
-
+
 org.apache.hadoop.hbase.util.byterange
 
 
-
+
 org.apache.hadoop.hbase.util.byterange.impl
 
 
-
+
 org.apache.hadoop.hbase.util.hbck
 
 
-
+
 org.apache.hadoop.hbase.util.test
 
 
-
+
 org.apache.hadoop.hbase.util.vint
 
 
-
+
 org.apache.hadoop.hbase.wal
 
 
-
+
 org.apache.hadoop.hbase.zookeeper
 
 
-
+
 org.apache.hadoop.metrics2.impl
 
 
-
+
 org.apache.hadoop.metrics2.lib
 
 
-
+
 org.apache.hadoop.metrics2.util
 
 
@@ -1039,7 +1043,7 @@ service.
 intindex)
 
 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index ad1b5d9..9ffa364 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -390,124 +390,128 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-org.apache.hadoop.hbase.regionserver
+org.apache.hadoop.hbase.quotas.policies
 
 
 
-org.apache.hadoop.hbase.regionserver.compactions
+org.apache.hadoop.hbase.regionserver
 
 
 
-org.apache.hadoop.hbase.regionserver.handler
+org.apache.hadoop.hbase.regionserver.compactions
 
 
 
-org.apache.hadoop.hbase.regionserver.querymatcher
+org.apache.hadoop.hbase.regionserver.handler
 
 
 
-org.apache.hadoop.hbase.regionserver.snapshot
+org.apache.hadoop.hbase.regionserver.querymatcher
 
 
 
-org.apache.hadoop.hbase.regionserver.throttle
+org.apache.hadoop.hbase.regionserver.snapshot
 
 
 
-org.apache.hadoop.hbase.regionserver.wal
+org.apache.hadoop.hbase.regionserver.throttle
 
 
 
+org.apache.hadoop.hbase.regionserver.wal
+
+
+
 org.apache.hadoop.hbase.replication
 
 Multi Cluster Replication
 
 
-
+
 org.apache.hadoop.hbase.replication.master
 
 
-
+
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.rest.filter
 
 
-
+
 org.apache.hadoop.hbase.rest.model
 
 
-
+
 org.apache.hadoop.hbase.rest.provider
 
 
-
+
 org.apache.hadoop.hbase.rest.provider.consumer
 
 
-
+
 org.apache.hadoop.hbase.rest.provider.producer
 
 
-
+
 org.apache.hadoop.hbase.rsgroup
 
 
-
+
 org.apache.hadoop.hbase.security
 
 
-
+
 org.apache.hadoop.hbase.security.access
 
 
-
+
 org.apache.hadoop.hbase.security.token
 
 
-
+
 org.apache.hadoop.hbase.security.visibility
 
 
-
+
 org.apache.hadoop.hbase.security.visibility.expression
 
 
-
+
 org.apache.hadoop.hbase.snapshot
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.thrift2
 
 Provides an HBase http://thrift.apache.org/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.tool
 
 
-
+
 org.apache.hadoop.hbase.trace
 
 
-
+
 org.apache.hadoop.hbase.types
 
 
@@ -515,47 +519,47 @@ service.
  extensible data type API.
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
-
+
 org.apache.hadoop.hbase.util.byterange
 
 
-
+
 org.apache.hadoop.hbase.util.byterange.impl
 
 
-
+
 org.apache.hadoop.hbase.util.hbck
 
 
-
+
 org.apache.hadoop.hbase.util.test
 
 
-
+
 org.apache.hadoop.hbase.util.vint
 
 
-
+
 org.apache.hadoop.hbase.wal
 
 
-
+
 org.apache.hadoop.hbase.zookeeper
 
 
-
+
 org.apache.hadoop.metrics2.impl
 
 
-
+
 org.apache.hadoop.metrics2.lib
 
 
-
+
 org.apache.hadoop.metrics2.util
 
 
@@ -1959,18 +1963,24 @@ service.
 
 
 
+class
+QuotaStatusCalls
+Client class to wrap RPCs to HBase servers for space quota 
status information.
+
+
+
 (package private) class
 RawAsyncTableImpl
 The implementation of RawAsyncTable.
 
 
-
+
 class
 RegionAdminServiceCallableT
 Similar to RegionServerCallable but for the AdminService 
interface.
 
 
-
+
 (package private) class
 RegionCoprocessorRpcChannel
 Provides clients with an RPC connection to call Coprocessor 
Endpoint
@@ -1978,87 +1988,87 @@ service.
  against a given table region.
 
 
-
+
 (package private) class
 RegionCoprocessorRpcChannelImpl
 The implementation of a region based coprocessor rpc 
channel.
 
 
-
+
 class
 RegionCoprocessorServiceExec
 Represents a coprocessor service method execution against a 
single region.
 
 
-
+
 (package private) class
 RegionLocateType
 Indicate which row you want to locate.
 
 
-
+
 class
 RegionReplicaUtil
 Utility methods which contain the logic for regions and 
replicas.
 
 
-
+
 class
 RegionServerCallableT,S
 Implementations make a RPC call against a RegionService via 
a protobuf Service.
 
 
-
+
 (package private) interface
 Registry
 Cluster registry.
 
 
-
+
 (package private) class
 RegistryFactory
 Get instance of configured Registry.
 
 
-
+
 class
 ResultBoundedCompletionServiceV
 A completion service for the RpcRetryingCallerFactory.
 
 
-
+
 class
 ResultStatsUtil
 A Result with 
some statistics about the server/region status
 
 
-
+
 static class
 RetriesExhaustedException.ThrowableWithExtraContext
 Datastructure that allows adding more info around Throwable 
incident.
 
 
-
+
 interface
 RetryingCallableT
 A CallableT that will be retried.
 
 
-
+
 (package private) class
 RetryingCallerInterceptor
 This class is designed to fit into the RetryingCaller class 
which forms 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* time
-239this.numTries = 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index 86e6e7f..96a43aa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -248,7 +248,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.master.HMaster
-abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap, createNamespace,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 drainRegionServer,
 enableReplicationPeer,
 enableTable,
 getAssignmentManager, getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterSchema,
 getClusterStatus,
 getDumpServlet,
 getFavoredNodesManager,
 getFsTableDescriptors,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockMan
 ager, getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices,
 getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount,
 getMobCompactionState,
 getNamespace,
 getNamespaces,
 getNumWALFiles, getProcessName,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 getRegionServerFatalLogBuffer,
 getRegionServerInfoPort,
 getRegionServerVersion,
 getRemoteInetAddress,
 getReplicationPeerConfig,
 getServerCrashProcessingEnabledEvent,
 getServerManager,
 getServerName,
 getSnapshotManager,
 getSplitOrMergeTracker,
 getSplitPlanCount,
 getTableDescriptors,
 getTableRegionForRow,
 getTableStateManager,
 getWalProcedureStore,
 getZooKeeper,
 initClusterSchemaService,
 initializeZKBasedSystemTrackers,
 initQuotaManager,
 isActiveMaster,
 isBalancerOn, isCatalogJanitorEnabled,
 isCleanerChoreEnabled,
 isInitializationStartsMetaRegionAssignment,
 isInitialized,
 isInMaintenanceMode,
 isNormalizerOn,
 isServerCrashProcessingEnabled,
 isSplitOrMergeEnabled, listDrainingRegionServers,
 listLocks,
 listProcedures,
 listReplicationPeers,
 listTableDescriptors,
 listTableDescriptorsByNamespace,
 listTableNames
 , listTableNamesByNamespace,
 login,
 main,
 mergeRegions,
 modifyColumn,
 modifyNamespace,
 modifyTable,
 move,
 normalizeRegions,
 registerService,
 removeDrainFromRegionServer,
 removeReplicationPeer,
 reportMobCompactionEnd,
 reportMobCompactionStart, requestMobCompaction,
 restoreSnapshot,
 sendShutdownInterrupt,
 setCatalogJanitorEnabled,
 setInitialized,
 setServerCrashProcessingEnabled,
 shutdown, splitRegion,
 stopMaster,
 stopServiceThreads,
 truncateTable,
 updateReplicationPeerConfig,
 waitForMasterActive
+abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap, createNamespace,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 drainRegionServer,
 enableReplicationPeer,
 enableTable,
 getAssignmentManager, getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterSchema,
 getClusterStatus,
 getDumpServlet,
 getFavoredNodesManager,
 getFsTableDescriptors,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockMan
 ager, getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices,
 getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount,
 getMobCompactionState,
 getNamespace,
 getNamespaces,
 getNumWALFiles, getProcessName,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 getRegionServerFatalLogBuffer,
 getRegionServerInfoPort,
 getRegionServerVersion,
 getRemoteInetAddress,
 getReplicationPeerConfig,
 getServerCrashProcessingEnabledEvent,
 getServerManager,
 getServerName,
 getSnapshotManager,
 getSplitOrMergeTracker,
 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.ConnectionHeaderHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.ConnectionHeaderHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.ConnectionHeaderHandler.html
index 109b5f3..e484176 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.ConnectionHeaderHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.ConnectionHeaderHandler.html
@@ -23,484 +23,310 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package org.apache.hadoop.hbase.ipc;
-020
-021import 
io.netty.bootstrap.ServerBootstrap;
-022import io.netty.buffer.ByteBuf;
-023import 
io.netty.buffer.PooledByteBufAllocator;
-024import io.netty.buffer.Unpooled;
-025import io.netty.channel.Channel;
-026import io.netty.channel.ChannelFuture;
-027import 
io.netty.channel.ChannelFutureListener;
-028import 
io.netty.channel.ChannelHandlerContext;
-029import 
io.netty.channel.ChannelInboundHandlerAdapter;
-030import 
io.netty.channel.ChannelInitializer;
-031import io.netty.channel.ChannelOption;
-032import 
io.netty.channel.ChannelOutboundHandlerAdapter;
-033import 
io.netty.channel.ChannelPipeline;
-034import io.netty.channel.ChannelPromise;
-035import io.netty.channel.EventLoopGroup;
-036import 
io.netty.channel.epoll.EpollEventLoopGroup;
-037import 
io.netty.channel.epoll.EpollServerSocketChannel;
-038import 
io.netty.channel.group.ChannelGroup;
-039import 
io.netty.channel.group.DefaultChannelGroup;
-040import 
io.netty.channel.nio.NioEventLoopGroup;
-041import 
io.netty.channel.socket.SocketChannel;
-042import 
io.netty.channel.socket.nio.NioServerSocketChannel;
-043import 
io.netty.handler.codec.ByteToMessageDecoder;
-044import 
io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-045import 
io.netty.util.concurrent.GlobalEventExecutor;
-046
-047import java.io.IOException;
-048import java.io.InterruptedIOException;
-049import java.net.InetAddress;
-050import java.net.InetSocketAddress;
-051import java.nio.ByteBuffer;
-052import java.util.Arrays;
-053import java.util.List;
-054import 
java.util.concurrent.CountDownLatch;
-055
-056import org.apache.commons.logging.Log;
-057import 
org.apache.commons.logging.LogFactory;
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.hbase.CellScanner;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-063import 
org.apache.hadoop.hbase.nio.ByteBuff;
-064import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-065import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-066import 
org.apache.hadoop.hbase.security.AuthMethod;
-067import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-068import 
org.apache.hadoop.hbase.security.SaslStatus;
-069import 
org.apache.hadoop.hbase.security.SaslUtil;
-070import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.JVM;
-076import 
org.apache.hadoop.hbase.util.Pair;
-077import 
org.apache.hadoop.io.IntWritable;
-078import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-079import org.apache.htrace.TraceInfo;
+018package org.apache.hadoop.hbase.ipc;
+019
+020import 
io.netty.bootstrap.ServerBootstrap;
+021import io.netty.buffer.ByteBuf;
+022import 
io.netty.buffer.PooledByteBufAllocator;
+023import io.netty.buffer.Unpooled;
+024import io.netty.channel.Channel;
+025import io.netty.channel.ChannelFuture;
+026import 
io.netty.channel.ChannelFutureListener;
+027import 
io.netty.channel.ChannelHandlerContext;
+028import 
io.netty.channel.ChannelInboundHandlerAdapter;
+029import 
io.netty.channel.ChannelInitializer;
+030import io.netty.channel.ChannelOption;
+031import 
io.netty.channel.ChannelOutboundHandlerAdapter;
+032import 
io.netty.channel.ChannelPipeline;
+033import io.netty.channel.ChannelPromise;
+034import io.netty.channel.EventLoopGroup;
+035import 
io.netty.channel.epoll.EpollEventLoopGroup;
+036import 
io.netty.channel.epoll.EpollServerSocketChannel;
+037import 
io.netty.channel.group.ChannelGroup;
+038import 
io.netty.channel.group.DefaultChannelGroup;
+039import 
io.netty.channel.nio.NioEventLoopGroup;
+040import 
io.netty.channel.socket.SocketChannel;
+041import 
io.netty.channel.socket.nio.NioServerSocketChannel;
+042import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HelpCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HelpCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HelpCommand.html
index cc40d21..2df0b04 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HelpCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HelpCommand.html
@@ -61,8 +61,8 @@
 053import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants;
 054import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
 055import 
org.apache.hadoop.hbase.backup.BackupType;
-056import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
-057import 
org.apache.hadoop.hbase.backup.util.BackupSet;
+056import 
org.apache.hadoop.hbase.backup.util.BackupSet;
+057import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
 058import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 059import 
org.apache.hadoop.hbase.client.Connection;
 060import 
org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -122,670 +122,703 @@
 114
 115  public static abstract class Command 
extends Configured {
 116CommandLine cmdline;
-117
+117Connection conn;
 118Command(Configuration conf) {
-119  super(conf);
-120}
-121
-122public void execute() throws 
IOException {
-123  if (cmdline.hasOption("h") || 
cmdline.hasOption("help")) {
-124printUsage();
-125throw new 
IOException(INCORRECT_USAGE);
-126  }
-127}
-128
-129protected abstract void 
printUsage();
-130  }
-131
-132  private BackupCommands() {
-133throw new 
AssertionError("Instantiating utility class...");
-134  }
-135
-136  public static Command 
createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) {
-137Command cmd = null;
-138switch (type) {
-139case CREATE:
-140  cmd = new CreateCommand(conf, 
cmdline);
-141  break;
-142case DESCRIBE:
-143  cmd = new DescribeCommand(conf, 
cmdline);
-144  break;
-145case PROGRESS:
-146  cmd = new ProgressCommand(conf, 
cmdline);
-147  break;
-148case DELETE:
-149  cmd = new DeleteCommand(conf, 
cmdline);
-150  break;
-151case CANCEL:
-152  cmd = new CancelCommand(conf, 
cmdline);
-153  break;
-154case HISTORY:
-155  cmd = new HistoryCommand(conf, 
cmdline);
-156  break;
-157case SET:
-158  cmd = new BackupSetCommand(conf, 
cmdline);
-159  break;
-160case HELP:
-161default:
-162  cmd = new HelpCommand(conf, 
cmdline);
-163  break;
-164}
-165return cmd;
-166  }
-167
-168  static int numOfArgs(String[] args) {
-169if (args == null) return 0;
-170return args.length;
-171  }
-172
-173  public static class CreateCommand 
extends Command {
-174
-175CreateCommand(Configuration conf, 
CommandLine cmdline) {
-176  super(conf);
-177  this.cmdline = cmdline;
-178}
-179
-180@Override
-181public void execute() throws 
IOException {
-182  super.execute();
-183  if (cmdline == null || 
cmdline.getArgs() == null) {
-184printUsage();
-185throw new 
IOException(INCORRECT_USAGE);
-186  }
-187  String[] args = 
cmdline.getArgs();
-188  if (args.length !=3) {
-189printUsage();
-190throw new 
IOException(INCORRECT_USAGE);
-191  }
-192
-193  if 
(!BackupType.FULL.toString().equalsIgnoreCase(args[1])
-194   
!BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) {
-195System.out.println("ERROR: 
invalid backup type: " + args[1]);
-196printUsage();
-197throw new 
IOException(INCORRECT_USAGE);
-198  }
-199  if (!verifyPath(args[2])) {
-200System.out.println("ERROR: 
invalid backup destination: " + args[2]);
-201printUsage();
-202throw new 
IOException(INCORRECT_USAGE);
-203  }
-204
-205  String tables = null;
-206  Configuration conf = getConf() != 
null ? getConf() : HBaseConfiguration.create();
-207
-208  // Check if we have both: backup 
set and list of tables
-209  if (cmdline.hasOption(OPTION_TABLE) 
 cmdline.hasOption(OPTION_SET)) {
-210System.out.println("ERROR: You 
can specify either backup set or list"
-211+ " of tables, but not 
both");
-212printUsage();
-213throw new 
IOException(INCORRECT_USAGE);
-214  }
-215
-216  // Check backup set
-217  String setName = null;
-218  if (cmdline.hasOption(OPTION_SET)) 
{
-219setName = 
cmdline.getOptionValue(OPTION_SET);
-220tables = getTablesForSet(setName, 
conf);
-221
-222if (tables == null) {
-223  System.out.println("ERROR: 
Backup set '" + setName
-224  + "' is 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 715514f..d72927b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -393,166 +393,166 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHMaster m_master
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-privateboolean m_catalogJanitorEnabled
+m_metaLocation
+privateServerName m_metaLocation
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-privateboolean m_catalogJanitorEnabled__IsNotDefault
+m_metaLocation__IsNotDefault
+privateboolean m_metaLocation__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_frags
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer m_frags
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-privateboolean m_filter__IsNotDefault
+m_frags__IsNotDefault
+privateboolean m_frags__IsNotDefault
 
 
-
+
 
 
 
 
-m_serverManager
-privateServerManager m_serverManager
+m_filter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-privateboolean m_serverManager__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-privateAssignmentManager m_assignmentManager
+m_serverManager
+privateServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-privateboolean m_assignmentManager__IsNotDefault
+m_serverManager__IsNotDefault
+privateboolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
+m_servers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-privateboolean m_deadServers__IsNotDefault
+m_servers__IsNotDefault
+privateboolean m_servers__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
+m_assignmentManager
+privateAssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-privateboolean m_servers__IsNotDefault
+m_assignmentManager__IsNotDefault
+privateboolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_catalogJanitorEnabled
+privateboolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+privateboolean m_catalogJanitorEnabled__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer m_frags
+m_deadServers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-privateboolean m_frags__IsNotDefault
+m_deadServers__IsNotDefault
+privateboolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_metaLocation
-privateServerName m_metaLocation
+m_format
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_metaLocation__IsNotDefault
-privateboolean m_metaLocation__IsNotDefault
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
 
 
 
@@ -598,247 +598,247 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicHMastergetMaster()
 
 
-
+
 
 
 
 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
index 6e667ab..6f3e7f8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
@@ -1866,7 +1866,7 @@ publicvoidReturns:
 scanner id to return to client if default operation should be
- bypassed, false otherwise
+ bypassed, null otherwise
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - Exception
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessOperationWithResult.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessOperationWithResult.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessOperationWithResult.html
index b118dde..9396586 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessOperationWithResult.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessOperationWithResult.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class RegionServerCoprocessorHost.CoprocessOperationWithResultT
+private abstract static class RegionServerCoprocessorHost.CoprocessOperationWithResultT
 extends RegionServerCoprocessorHost.CoprocessorOperation
 
 
@@ -231,7 +231,7 @@ extends 
 
 result
-privateT result
+privateT result
 
 
 
@@ -248,7 +248,7 @@ extends 
 
 CoprocessOperationWithResult
-privateCoprocessOperationWithResult()
+privateCoprocessOperationWithResult()
 
 
 
@@ -267,7 +267,7 @@ extends 
 
 setResult
-publicvoidsetResult(Tresult)
+publicvoidsetResult(Tresult)
 
 
 
@@ -276,7 +276,7 @@ extends 
 
 getResult
-publicTgetResult()
+publicTgetResult()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessorOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessorOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessorOperation.html
index b1b1a23..45c9300 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessorOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.CoprocessorOperation.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class RegionServerCoprocessorHost.CoprocessorOperation
+private abstract static class RegionServerCoprocessorHost.CoprocessorOperation
 extends ObserverContextRegionServerCoprocessorEnvironment
 
 
@@ -206,7 +206,7 @@ extends 
 
 CoprocessorOperation
-publicCoprocessorOperation()
+publicCoprocessorOperation()
 
 
 
@@ -215,7 +215,7 @@ extends 
 
 CoprocessorOperation
-publicCoprocessorOperation(Useruser)
+publicCoprocessorOperation(Useruser)
 
 
 
@@ -232,7 +232,7 @@ extends 
 
 call
-public abstractvoidcall(RegionServerObserveroserver,
+public abstractvoidcall(RegionServerObserveroserver,
   ObserverContextRegionServerCoprocessorEnvironmentctx)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -247,7 +247,7 @@ extends 
 
 postEnvCall
-publicvoidpostEnvCall(RegionServerCoprocessorHost.RegionServerEnvironmentenv)
+publicvoidpostEnvCall(RegionServerCoprocessorHost.RegionServerEnvironmentenv)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.EnvironmentPriorityComparator.html
index 05ddaea..98c1982 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.EnvironmentPriorityComparator.html
@@ -117,7 +117,7 @@ var 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.Call.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.Call.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.Call.html
deleted file mode 100644
index 27e0dee..000
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.Call.html
+++ /dev/null
@@ -1,612 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase.ipc;
-020
-021import 
io.netty.bootstrap.ServerBootstrap;
-022import io.netty.buffer.ByteBuf;
-023import 
io.netty.buffer.PooledByteBufAllocator;
-024import io.netty.buffer.Unpooled;
-025import io.netty.channel.Channel;
-026import io.netty.channel.ChannelFuture;
-027import 
io.netty.channel.ChannelFutureListener;
-028import 
io.netty.channel.ChannelHandlerContext;
-029import 
io.netty.channel.ChannelInboundHandlerAdapter;
-030import 
io.netty.channel.ChannelInitializer;
-031import io.netty.channel.ChannelOption;
-032import 
io.netty.channel.ChannelOutboundHandlerAdapter;
-033import 
io.netty.channel.ChannelPipeline;
-034import io.netty.channel.ChannelPromise;
-035import io.netty.channel.EventLoopGroup;
-036import 
io.netty.channel.epoll.EpollEventLoopGroup;
-037import 
io.netty.channel.epoll.EpollServerSocketChannel;
-038import 
io.netty.channel.group.ChannelGroup;
-039import 
io.netty.channel.group.DefaultChannelGroup;
-040import 
io.netty.channel.nio.NioEventLoopGroup;
-041import 
io.netty.channel.socket.SocketChannel;
-042import 
io.netty.channel.socket.nio.NioServerSocketChannel;
-043import 
io.netty.handler.codec.ByteToMessageDecoder;
-044import 
io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-045import 
io.netty.util.concurrent.GlobalEventExecutor;
-046
-047import java.io.IOException;
-048import java.io.InterruptedIOException;
-049import java.net.InetAddress;
-050import java.net.InetSocketAddress;
-051import java.nio.ByteBuffer;
-052import java.util.Arrays;
-053import java.util.List;
-054import 
java.util.concurrent.CountDownLatch;
-055
-056import org.apache.commons.logging.Log;
-057import 
org.apache.commons.logging.LogFactory;
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.hbase.CellScanner;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-064import 
org.apache.hadoop.hbase.nio.ByteBuff;
-065import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-066import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-067import 
org.apache.hadoop.hbase.security.AuthMethod;
-068import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-069import 
org.apache.hadoop.hbase.security.SaslStatus;
-070import 
org.apache.hadoop.hbase.security.SaslUtil;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-073import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.JVM;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.io.IntWritable;
-079import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-080import org.apache.htrace.TraceInfo;
-081
-082/**
-083 * An RPC server with Netty4 
implementation.
-084 *
-085 */
-086public class NettyRpcServer extends 
RpcServer {
-087
-088  public static final Log LOG = 
LogFactory.getLog(NettyRpcServer.class);
-089
-090  protected final InetSocketAddress 
bindAddress;
-091
-092  private final CountDownLatch closed = 
new CountDownLatch(1);
-093  private final Channel serverChannel;
-094  private final ChannelGroup 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallCleanup.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallCleanup.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallCleanup.html
index db06fac..22e9c56 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallCleanup.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.CallCleanup.html
@@ -131,6 +131,60 @@
 
 
 
+
+Methods in org.apache.hadoop.hbase.ipc
 with parameters of type RpcServer.CallCleanup
+
+Modifier and Type
+Method and Description
+
+
+
+abstract RpcServer.Call
+RpcServer.Connection.createCall(intid,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeaderheader,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+  CellScannercellScanner,
+  RpcServer.Connectionconnection,
+  longsize,
+  org.apache.htrace.TraceInfotinfo,
+  http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressremoteAddress,
+  inttimeout,
+  RpcServer.CallCleanupreqCleanup)
+
+
+RpcServer.Call
+SimpleRpcServer.Connection.createCall(intid,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeaderheader,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+  CellScannercellScanner,
+  RpcServer.Connectionconnection,
+  longsize,
+  org.apache.htrace.TraceInfotinfo,
+  http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressremoteAddress,
+  inttimeout,
+  RpcServer.CallCleanupreqCleanup)
+
+
+RpcServer.Call
+NettyRpcServer.NettyConnection.createCall(intid,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeaderheader,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+  CellScannercellScanner,
+  RpcServer.Connectionconnection,
+  longsize,
+  org.apache.htrace.TraceInfotinfo,
+  http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressremoteAddress,
+  inttimeout,
+  RpcServer.CallCleanupreqCleanup)
+
+
+
 
 Constructors in org.apache.hadoop.hbase.ipc
 with parameters of type RpcServer.CallCleanup
 
@@ -152,20 +206,34 @@
 RpcServer.CallCleanupreqCleanup)
 
 
-Call(intid,
+Call(intid,
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeaderheader,
 org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
 CellScannercellScanner,
-SimpleRpcServer.Connectionconnection,
-SimpleRpcServer.Responderresponder,
+RpcServer.Connectionconnection,
 longsize,
 org.apache.htrace.TraceInfotinfo,
 http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressremoteAddress,
 inttimeout,
 RpcServer.CallCleanupreqCleanup)
 
+
+Call(intid,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeaderheader,
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+CellScannercellScanner,
+RpcServer.Connectionconnection,
+longsize,
+org.apache.htrace.TraceInfotinfo,
+http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressremoteAddress,
+inttimeout,
+RpcServer.CallCleanupreqCleanup,
+SimpleRpcServer.Responderresponder)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.Connection.html
--
diff --git 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 6c52543..f3f7a46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -31,1797 +31,2040 @@
 023import java.util.ArrayList;
 024import java.util.Arrays;
 025import java.util.Collection;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Optional;
-031import 
java.util.concurrent.CompletableFuture;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import java.util.function.BiConsumer;
-035import java.util.regex.Pattern;
-036import java.util.stream.Collectors;
-037
-038import 
com.google.common.annotations.VisibleForTesting;
-039
-040import io.netty.util.Timeout;
-041import io.netty.util.TimerTask;
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-049import 
org.apache.hadoop.hbase.NotServingRegionException;
-050import 
org.apache.hadoop.hbase.RegionLocations;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-053import 
org.apache.hadoop.hbase.HConstants;
-054import 
org.apache.hadoop.hbase.TableExistsException;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-057import 
org.apache.hadoop.hbase.TableNotFoundException;
-058import 
org.apache.hadoop.hbase.UnknownRegionException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-061import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-062import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-063import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-064import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-065import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-066import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-067import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-068import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-069import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-070import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-091import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/apidocs/src-html/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.html
deleted file mode 100644
index 779556b..000
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.html
+++ /dev/null
@@ -1,199 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019
-020package org.apache.hadoop.hbase.client;
-021
-022import 
org.apache.hadoop.hbase.HColumnDescriptor;
-023import 
org.apache.hadoop.hbase.HTableDescriptor;
-024import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-025
-026/**
-027 * Read-only table descriptor.
-028 */
-029@InterfaceAudience.Public
-030public class UnmodifyableHTableDescriptor 
extends HTableDescriptor {
-031  /**
-032   * Default constructor.
-033   * @deprecated  As of release 2.0.0. 
This will be removed in HBase 3.0.0.
-034   *  Use {@link 
#UnmodifyableHTableDescriptor(HTableDescriptor)}.
-035   */
-036  @Deprecated
-037  public UnmodifyableHTableDescriptor() 
{
-038super();
-039  }
-040
-041  /*
-042   * Create an unmodifyable copy of an 
HTableDescriptor
-043   * @param desc
-044   */
-045  UnmodifyableHTableDescriptor(final 
HTableDescriptor desc) {
-046super(desc.getTableName(), 
getUnmodifyableFamilies(desc), desc.getValues());
-047  }
-048
-049
-050  /*
-051   * @param desc
-052   * @return Families as unmodifiable 
array.
-053   */
-054  private static HColumnDescriptor[] 
getUnmodifyableFamilies(
-055  final HTableDescriptor desc) {
-056HColumnDescriptor [] f = new 
HColumnDescriptor[desc.getFamilies().size()];
-057int i = 0;
-058for (HColumnDescriptor c: 
desc.getFamilies()) {
-059  f[i++] = c;
-060}
-061return f;
-062  }
-063
-064  /**
-065   * Does NOT add a column family. This 
object is immutable
-066   * @param family HColumnDescriptor of 
familyto add.
-067   */
-068  @Override
-069  public UnmodifyableHTableDescriptor 
addFamily(final HColumnDescriptor family) {
-070throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-071  }
-072
-073  @Override
-074  public UnmodifyableHTableDescriptor 
modifyFamily(HColumnDescriptor family) {
-075throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-076  }
-077
-078  /**
-079   * @param column
-080   * @return Column descriptor for the 
passed family name or the family on
-081   * passed in column.
-082   */
-083  @Override
-084  public HColumnDescriptor 
removeFamily(final byte [] column) {
-085throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-086  }
-087
-088  /**
-089   * @see 
org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
-090   */
-091  @Override
-092  public UnmodifyableHTableDescriptor 
setReadOnly(boolean readOnly) {
-093throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-094  }
-095
-096  /**
-097   * @see 
org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
-098   */
-099  @Override
-100  public UnmodifyableHTableDescriptor 
setValue(byte[] key, byte[] value) {
-101throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-102  }
-103
-104  /**
-105   * @see 
org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, 
java.lang.String)
-106   */
-107  @Override
-108  public UnmodifyableHTableDescriptor 
setValue(String key, String value) {
-109throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-110  }
-111
-112  /**
-113   * @see 
org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
-114   */
-115  @Override
-116  public UnmodifyableHTableDescriptor 
setMaxFileSize(long maxFileSize) {
-117throw new 
UnsupportedOperationException("HTableDescriptor is read-only");
-118  }
-119
-120  /**
-121   * @see 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
index 302cfb3..26a93ce 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
@@ -293,22 +293,22 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHRegionServer m_regionServer
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_bcn
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_bcn
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-privateboolean m_filter__IsNotDefault
+m_bcn__IsNotDefault
+privateboolean m_bcn__IsNotDefault
 
 
 
@@ -329,40 +329,40 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateboolean m_bcv__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_filter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_bcn
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_bcn
+m_format
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_bcn__IsNotDefault
-privateboolean m_bcn__IsNotDefault
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
 
 
 
@@ -408,31 +408,31 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicHRegionServergetRegionServer()
 
 
-
+
 
 
 
 
-setFilter
-publicvoidsetFilter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfilter)
+setBcn
+publicvoidsetBcn(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn)
 
 
-
+
 
 
 
 
-getFilter
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFilter()
+getBcn
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetBcn()
 
 
-
+
 
 
 
 
-getFilter__IsNotDefault
-publicbooleangetFilter__IsNotDefault()
+getBcn__IsNotDefault
+publicbooleangetBcn__IsNotDefault()
 
 
 
@@ -462,58 +462,58 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicbooleangetBcv__IsNotDefault()
 
 
-
+
 
 
 
 
-setFormat
-publicvoidsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringformat)
+setFilter
+publicvoidsetFilter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfilter)
 
 
-
+
 
 
 
 
-getFormat
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFormat()
+getFilter
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFilter()
 
 
-
+
 
 
 
 
-getFormat__IsNotDefault
-publicbooleangetFormat__IsNotDefault()
+getFilter__IsNotDefault
+publicbooleangetFilter__IsNotDefault()
 
 
-
+
 
 
 
 
-setBcn
-publicvoidsetBcn(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn)
+setFormat
+publicvoidsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringformat)
 
 
-
+
 
 
 
 
-getBcn
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetBcn()
+getFormat
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFormat()
 
 
-
+
 
 
 
 
-getBcn__IsNotDefault
-publicbooleangetBcn__IsNotDefault()
+getFormat__IsNotDefault
+publicbooleangetFormat__IsNotDefault()
 
 
 


[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
index ac4a9b3..be839b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.Converter.html
@@ -30,212 +30,212 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Optional;
-028import 
java.util.concurrent.CompletableFuture;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import java.util.function.BiConsumer;
-032import java.util.regex.Pattern;
-033
-034import 
com.google.common.annotations.VisibleForTesting;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.HRegionLocation;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.MetaTableAccessor;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-043import 
org.apache.hadoop.hbase.NotServingRegionException;
-044import 
org.apache.hadoop.hbase.RegionLocations;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.UnknownRegionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-054import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-055import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-060import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-061import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-086import 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheConfig.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheConfig.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheConfig.html
index 18e401a..4f40861 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheConfig.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheConfig.html
@@ -179,21 +179,27 @@
 
 
 
-HalfStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+HalfStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathp,
CacheConfigcacheConf,
Referencer,
+   booleanisPrimaryReplicaStoreFile,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
+   booleanshared,
org.apache.hadoop.conf.Configurationconf)
 Creates a half file reader for a normal hfile.
 
 
 
-HalfStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+HalfStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathp,
FSDataInputStreamWrapperin,
longsize,
CacheConfigcacheConf,
Referencer,
+   booleanisPrimaryReplicaStoreFile,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
+   booleanshared,
org.apache.hadoop.conf.Configurationconf)
 Creates a half file reader for a hfile referred to by an 
hfilelink.
 
@@ -251,18 +257,20 @@
 
 
 static HFile.Reader
-HFile.createReader(org.apache.hadoop.fs.FileSystemfs,
+HFile.createReader(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.fs.Pathpath,
 CacheConfigcacheConf,
+booleanprimaryReplicaReader,
 org.apache.hadoop.conf.Configurationconf)
 
 
 static HFile.Reader
-HFile.createReader(org.apache.hadoop.fs.FileSystemfs,
+HFile.createReader(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.fs.Pathpath,
 FSDataInputStreamWrapperfsdis,
 longsize,
 CacheConfigcacheConf,
+booleanprimaryReplicaReader,
 org.apache.hadoop.conf.Configurationconf)
 
 
@@ -290,11 +298,12 @@
 
 
 private static HFile.Reader
-HFile.pickReaderVersion(org.apache.hadoop.fs.Pathpath,
+HFile.pickReaderVersion(org.apache.hadoop.fs.Pathpath,
  FSDataInputStreamWrapperfsdis,
  longsize,
  CacheConfigcacheConf,
  HFileSystemhfs,
+ booleanprimaryReplicaReader,
  org.apache.hadoop.conf.Configurationconf)
 Method returns the reader given the specified 
arguments.
 
@@ -320,17 +329,29 @@
 
 
 
-HFileReaderImpl(org.apache.hadoop.fs.Pathpath,
+HFileReaderImpl(org.apache.hadoop.fs.Pathpath,
FixedFileTrailertrailer,
FSDataInputStreamWrapperfsdis,
longfileSize,
CacheConfigcacheConf,
HFileSystemhfs,
+   booleanprimaryReplicaReader,
org.apache.hadoop.conf.Configurationconf)
 Opens a HFile.
 
 
 
+HFileReaderImpl(org.apache.hadoop.fs.Pathpath,
+   FixedFileTrailertrailer,
+   FSDataInputStreamWrapperfsdis,
+   longfileSize,
+   CacheConfigcacheConf,
+   HFileSystemhfs,
+   org.apache.hadoop.conf.Configurationconf)
+Deprecated.
+
+
+
 HFileWriterImpl(org.apache.hadoop.conf.Configurationconf,
CacheConfigcacheConf,
org.apache.hadoop.fs.Pathpath,
@@ -338,7 +359,7 @@
CellComparatorcomparator,
HFileContextfileContext)
 
-
+
 WriterFactory(org.apache.hadoop.conf.Configurationconf,
  CacheConfigcacheConf)
 
@@ -503,10 +524,11 @@
 
 
 private static void
-MobUtils.validateMobFile(org.apache.hadoop.conf.Configurationconf,
+MobUtils.validateMobFile(org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
-   CacheConfigcacheConfig)
+   CacheConfigcacheConfig,
+   booleanprimaryReplica)
 Validates a mob file by opening and closing it.
 
 
@@ -596,9 +618,13 @@
 
 
 StoreFileReader
-StoreFileInfo.open(org.apache.hadoop.fs.FileSystemfs,
+StoreFileInfo.open(org.apache.hadoop.fs.FileSystemfs,
 CacheConfigcacheConf,
-booleancanUseDropBehind)
+booleancanUseDropBehind,
+longreadahead,
+

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index 99f6464..6e4fbbb 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -903,16 +903,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-ChunkCreator
-Does the management of memstoreLAB chunk creations.
-
-
-
-ChunkCreator.MemStoreChunkPool
-A pool of Chunk 
instances.
-
-
-
 CompactedHFilesDischarger
 A chore service that periodically cleans up the compacted 
files when there are no active readers
  using those compacted files and also helps in clearing the block cache with 
these compacted
@@ -1172,17 +1162,22 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
+MemStoreChunkPool
+A pool of Chunk 
instances.
+
+
+
 MemStoreCompactor
 The ongoing MemStore Compaction manager, dispatches a solo 
running compaction and interrupts
  the compaction if requested.
 
 
-
+
 MemStoreCompactor.Action
 Types of actions to be done on the pipeline upon 
MemStoreCompaction invocation.
 
 
-
+
 MemStoreFlusher
 Thread that flushes cache on request
 
@@ -1191,23 +1186,23 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  sleep time which is invariant.
 
 
-
+
 MemStoreFlusher.FlushHandler
 
-
+
 MemStoreFlusher.FlushQueueEntry
 
-
+
 MemStoreFlusher.FlushRegionEntry
 Datastructure used in the flush queue.
 
 
-
+
 MemStoreLAB
 A memstore-local allocation buffer.
 
 
-
+
 MemStoreSegmentsIterator
 The MemStoreSegmentsIterator is designed to perform one 
iteration over given list of segments
  For another iteration new instance of MemStoreSegmentsIterator needs to be 
created
@@ -1215,529 +1210,529 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  in each period of time
 
 
-
+
 MemstoreSize
 Wraps the data size part and total heap space occupied by 
the memstore.
 
 
-
+
 MemStoreSnapshot
 Holds details of the snapshot taken on a MemStore.
 
 
-
+
 MetricsHeapMemoryManager
 This class is for maintaining the various regionserver's 
heap memory manager statistics and
  publishing them through the metrics interfaces.
 
 
-
+
 MetricsHeapMemoryManagerSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  HeapMemoryManager in RegionServer into the hadoop metrics system.
 
 
-
+
 MetricsHeapMemoryManagerSourceImpl
 Hadoop2 implementation of 
MetricsHeapMemoryManagerSource.
 
 
-
+
 MetricsRegion
 This is the glue between the HRegion and whatever hadoop 
shim layer
  is loaded (hbase-hadoop1-compat or hbase-hadoop2-compat).
 
 
-
+
 MetricsRegionAggregateSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  multiple regions into the hadoop metrics system.
 
 
-
+
 MetricsRegionAggregateSourceImpl
 
-
+
 MetricsRegionServer
 
  This class is for maintaining the various regionserver statistics
  and publishing them through the metrics interfaces.
 
 
-
+
 MetricsRegionServerSource
 Interface for classes that expose metrics about the 
regionserver.
 
 
-
+
 MetricsRegionServerSourceFactory
 Interface of a factory to create Metrics Sources used 
inside of regionservers.
 
 
-
+
 MetricsRegionServerSourceFactoryImpl.FactoryStorage
 
-
+
 MetricsRegionServerWrapper
 This is the interface that will expose RegionServer 
information to hadoop1/hadoop2
  implementations of the MetricsRegionServerSource.
 
 
-
+
 MetricsRegionSource
 This interface will be implemented to allow single regions 
to push metrics into
  MetricsRegionAggregateSource that will in turn push data to the Hadoop 
metrics system.
 
 
-
+
 MetricsRegionWrapper
 Interface of class that will wrap an HRegion and export 
numbers so they can be
  used in MetricsRegionSource
 
 
-
+
 MetricsRegionWrapperImpl
 
-
+
 MetricsTable
 
-
+
 MetricsTableAggregateSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  multiple regions of a table into the hadoop metrics system.
 
 
-
+
 MetricsTableAggregateSourceImpl
 
-
+
 MetricsTableSource
 This interface will be implemented to allow region server 
to push table metrics into
  MetricsRegionAggregateSource that will in turn push data to the Hadoop 
metrics system.
 
 
-
+
 MetricsTableWrapperAggregate
 Interface of class that will wrap a MetricsTableSource and 
export numbers so they can be
  used in MetricsTableSource
 
 
-
+
 MetricsTableWrapperAggregateImpl.MetricsTableValues
 
-
+
 MiniBatchOperationInProgress
 Wraps together the mutations which are applied as a batch 
to the region and their operation
  status and WALEdits.
 
 
-
+
 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index 267ac80..a15fce9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -255,7 +255,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer
-abort,
 addToMovedRegions,
 addToOnlineRegions,
 checkFileSystem,
 cleanMovedRegions,
 closeAllRegions,
 closeAndOfflineRegion
 ForSplitOrMerge, closeRegion,
 constructRegionServer,
 convertThrowableToIOE,
 createClusterConnection,
 createRegionLoad,
 createRegionServerS
 tatusStub, createRegionServerStatusStub,
 execRegionServerService,
 getCacheConfig,
 getChoreService,
 getClusterConnection,
 getClusterId,
 getCompactionPressure, getCompactionRequester,
 getCompactSplitThread,
 getConfiguration,
 getConfigurationManager,
 getConnection,
 getCoordinatedStateManager,
 getExecutorService,
 getFavoredNodesForRegion,
 getFileSystem,
 getFlushPressure,
 getFlushRequester,
 getFlushThroughputController,
 getFromOnlineRegions,
 getHeapMemoryManager,
 getInfoServer, getLastSequenceId,
 getLeases,
 getMasterAddressTracker,
 getMetaTableLocator,
 getMetrics,
 getMostLoadedRegions,
 getNonceManager,
 getNumberOfOnlineRegions, getOnlineRegion,
 getOnlineRegions,
 getOnlineRegions,
 getOnlineRegionsLocalContext,
 getOnlineTables,
 getRecoveringRegions,
 getRegion,
 getRegionBlockLocations,
 getRegionByEncodedName,
 getRegionByEncodedName,
 getRegionServerAccounting,
 getRegionServerCoprocessorHost,
 getRegionServerCoprocessors,
 getRegionServerMetrics,
 getRegionServerQuotaManager,
 getRegionsInTransitionInRS,
 getReplicationSourceService,
 getRootDir,
 getRpcServer,
 getRSRpcServices,
 getSecureBulkLoadManager,
 getStartcode, getThreadWakeFrequency,
 getWAL,
 getWALFileSystem,
 getWalRoller,
 getWALRootDir,
 getWALs,
 handleReportForDutyResponse,
 isAborted,
 isOnline,
 isProcedureFinished,
 isStopped,
 isStopping,
 kill,
 movedRegionCleanerPeriod,
 onConfigurationChange,
 postOpenDeployTasks,
 postOpenDeployTasks,
 regionLock,
 removeFromOnlineRegions,
 reportRegionStateTransition,
 reportRegionStateTransition,
 reportRegionStateTransition,
 requestRegionSplit,
 setInitLatch,
 setupCluster
 Connection, shouldUseThisHostnameInstead,
 stop,
 toString,
 tryRegionServerReport,
 unassign,
 updateConfiguration,
 updateRegionFavoredNodesMapping,
 waitForServerOnline, 
walRollRequestFinished
+abort,
 addToMovedRegions,
 addToOnlineRegions,
 checkFileSystem,
 cleanMovedRegions,
 closeAllRegions,
 closeAndOfflineRegion
 ForSplitOrMerge, closeRegion,
 constructRegionServer,
 convertThrowableToIOE,
 createClusterConnection,
 createRegionLoad,
 createRegionServerS
 tatusStub, createRegionServerStatusStub,
 execRegionServerService,
 getCacheConfig,
 getChoreService,
 getClusterConnection,
 getClusterId,
 getCompactionPressure, getCompactionRequester,
 getCompactSplitThread,
 getConfiguration,
 getConfigurationManager,
 getConnection,
 getCoordinatedStateManager,
 getExecutorService,
 getFavoredNodesForRegion,
 getFileSystem,
 getFlushPressure,
 getFlushRequester,
 getFlushThroughputController,
 getFromOnlineRegions,
 getHeapMemoryManager,
 getInfoServer, getLastSequenceId,
 getLeases,
 getMasterAddressTracker,
 getMetaTableLocator,
 getMetrics,
 getMostLoadedRegions,
 getNonceManager,
 getNumberOfOnlineRegions, getOnlineRegion,
 getOnlineRegions,
 getOnlineRegions,
 getOnlineRegionsLocalContext,
 getOnlineTables,
 getRecoveringRegions,
 getRegion,
 getRegionBlockLocations,
 getRegionByEncodedName,
 getRegionByEncodedName,
 getRegionServerAccounting,
 getRegionServerCoprocessorHost,
 getRegionServerCoprocessors,
 getRegionServerMetrics,
 getRegionServerQuotaManager,
 getRegionsInTransitionInRS,
 getReplicationSourceService,
 getRootDir,
 getRpcServer,
 getRSRpcServices,
 getSecureBulkLoadManager,
 getStartcode, getThreadWakeFrequency,
 getWAL,
 getWALFileSystem,
 getWalRoller,
 getWALRootDir,
 getWALs,
 handleReportForDutyResponse,
 initializeMemStoreChunkCreator,
 isAborted,
 isOnline,
 isProcedureFinished,
 isStopped,
 isStopping,
 kill,
 movedRegionCleanerPeriod,
 onConfigurationChange,
 postOpenDeployTasks,
 postOpenDeployTasks,
 regionLock,
 removeFromOnlineRegions,
 reportRegionStateTransition,
 reportRegionStateTransition,
 reportRegionStateTransition,
 requestRegionSplit,
 setInitLatch, setupClusterConnection,
 shouldUseThisHostnameInstead,
 stop,
 toString,
 tryRegionServerReport,
 unassign,
 updateConfiguration,
 updateRegionFavoredNodesMapping,
 waitForServerOnline,
 walRollRequestFinished
 
 
 


[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/ScheduledChore.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ScheduledChore.html 
b/apidocs/org/apache/hadoop/hbase/ScheduledChore.html
deleted file mode 100644
index c54efc1..000
--- a/apidocs/org/apache/hadoop/hbase/ScheduledChore.html
+++ /dev/null
@@ -1,581 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-ScheduledChore (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":6,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase
-Class ScheduledChore
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.ScheduledChore
-
-
-
-
-
-
-
-All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
-
-
-
-@InterfaceAudience.Public
-public abstract class ScheduledChore
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
-ScheduledChore is a task performed on a period in hbase. 
ScheduledChores become active once
- scheduled with a ChoreService via ChoreService.scheduleChore(ScheduledChore).
 The
- chore is run in a http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledThreadPoolExecutor and competes 
with other ScheduledChores for
- access to the threads in the core thread pool. If an unhandled exception 
occurs, the chore
- cancellation is logged. Implementers should consider whether or not the Chore 
will be able to
- execute within the defined period. It is bad practice to define a 
ScheduledChore whose execution
- time exceeds its period since it will try to hog one of the threads in the ChoreService's
- thread pool.
- 
- Don't subclass ScheduledChore if the task relies on being woken up for 
something to do, such as
- an entry being added to a queue, etc.
-
-
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Modifier
-Constructor and Description
-
-
-protected 
-ScheduledChore()
-This constructor is for test only.
-
-
-
-
-ScheduledChore(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
-  Stoppablestopper,
-  intperiod)
-
-
-
-ScheduledChore(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
-  Stoppablestopper,
-  intperiod,
-  longinitialDelay)
-
-
-
-ScheduledChore(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
-  Stoppablestopper,
-  intperiod,
-  longinitialDelay,
-  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsAbstract MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-cancel()
-
-
-void
-cancel(booleanmayInterruptIfRunning)
-
-
-protected abstract void
-chore()
-The task to execute on each scheduled execution of the 
Chore
-
-
-
-protected void
-cleanup()
-Override to run cleanup tasks when the Chore encounters an 
error and must stop running
-
-
-
-long
-getInitialDelay()
-
-