[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html index 2b1a8b7..50028a7 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html @@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab"; -class HRegion.RegionScannerImpl +class HRegion.RegionScannerImpl extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements RegionScanner, RpcCallback RegionScannerImpl is used to combine scanners from multiple Stores (aka column families). @@ -429,7 +429,7 @@ implements storeHeap -KeyValueHeap storeHeap +KeyValueHeap storeHeap @@ -438,7 +438,7 @@ implements joinedHeap -KeyValueHeap joinedHeap +KeyValueHeap joinedHeap Heap of key-values that are not essential for the provided filters and are thus read on demand, if on-demand column family loading is enabled. @@ -449,7 +449,7 @@ implements joinedContinuationRow -protectedCell joinedContinuationRow +protectedCell joinedContinuationRow If the joined heap data gathering is interrupted due to scan limits, this will contain the row for which we are populating the values. @@ -460,7 +460,7 @@ implements filterClosed -privateboolean filterClosed +privateboolean filterClosed @@ -469,7 +469,7 @@ implements stopRow -protected finalbyte[] stopRow +protected finalbyte[] stopRow @@ -478,7 +478,7 @@ implements includeStopRow -protected finalboolean includeStopRow +protected finalboolean includeStopRow @@ -487,7 +487,7 @@ implements region -protected finalHRegion region +protected finalHRegion region @@ -496,7 +496,7 @@ implements comparator -protected finalCellComparator comparator +protected finalCellComparator comparator @@ -505,7 +505,7 @@ implements readPt -private finallong readPt +private finallong readPt @@ -514,7 +514,7 @@ implements maxResultSize -private finallong maxResultSize +private finallong maxResultSize @@ -523,7 +523,7 @@ implements defaultScannerContext -private finalScannerContext defaultScannerContext +private finalScannerContext defaultScannerContext @@ -532,7 +532,7 @@ implements filter -private finalFilterWrapper filter +private finalFilterWrapper filter @@ -549,7 +549,7 @@ implements RegionScannerImpl -RegionScannerImpl(Scanscan, +RegionScannerImpl(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners, HRegionregion) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -565,7 +565,7 @@ implements RegionScannerImpl -RegionScannerImpl(Scanscan, +RegionScannerImpl(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners, HRegionregion, longnonceGroup, @@ -591,7 +591,7 @@ implements getRegionInfo -publicHRegionInfogetRegionInfo() +publicHRegionInfogetRegionInfo() Specified by: getRegionInfoin interfaceRegionScanner @@ -606,7 +606,7 @@ implements initializeScanners -protectedvoidinitializeScanners(Scanscan, +protectedvoidinitializeScanners(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -621,7 +621,7 @@ implements initializeKVHeap -protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerscanners, +protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerscanners, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScannerjoinedScanners, HRegionregion) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html index c91f40a..e65c773 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html @@ -772,30 +772,25 @@ void -FavoredStochasticBalancer.initialize() +BaseLoadBalancer.initialize() void -BaseLoadBalancer.initialize() +FavoredStochasticBalancer.initialize() ServerName -FavoredStochasticBalancer.randomAssignment(HRegionInforegionInfo, -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) - - -ServerName BaseLoadBalancer.randomAssignment(HRegionInforegionInfo, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) Used to assign a single region to a random server. - -http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo -FavoredStochasticBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameregions, + +ServerName +FavoredStochasticBalancer.randomAssignment(HRegionInforegionInfo, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo BaseLoadBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameregions, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) @@ -804,12 +799,12 @@ available/online servers available for assignment. - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo -FavoredStochasticBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInforegions, -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) +FavoredStochasticBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameregions, +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo BaseLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInforegions, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) @@ -817,6 +812,11 @@ simple round-robin assignment. + +http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo +FavoredStochasticBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInforegions, +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) + private
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html index 35d5549..7f42873 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html @@ -115,2816 +115,2814 @@ 107import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 108import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 109import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -149import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html index 5c95397..860416b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html @@ -293,7944 +293,7962 @@ 285 final AtomicLong compactionsFailed = new AtomicLong(0L); 286 final AtomicLong compactionNumFilesCompacted = new AtomicLong(0L); 287 final AtomicLong compactionNumBytesCompacted = new AtomicLong(0L); -288 -289 private final WAL wal; -290 private final HRegionFileSystem fs; -291 protected final Configuration conf; -292 private final Configuration baseConf; -293 private final int rowLockWaitDuration; -294 static final int DEFAULT_ROWLOCK_WAIT_DURATION = 3; -295 -296 // The internal wait duration to acquire a lock before read/update -297 // from the region. It is not per row. The purpose of this wait time -298 // is to avoid waiting a long time while the region is busy, so that -299 // we can release the IPC handler soon enough to improve the -300 // availability of the region server. It can be adjusted by -301 // tuning configuration "hbase.busy.wait.duration". -302 final long busyWaitDuration; -303 static final long DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT; -304 -305 // If updating multiple rows in one call, wait longer, -306 // i.e. waiting for busyWaitDuration * # of rows. However, -307 // we can limit the max multiplier. -308 final int maxBusyWaitMultiplier; -309 -310 // Max busy wait duration. There is no point to wait longer than the RPC -311 // purge timeout, when a RPC call will be terminated by the RPC engine. -312 final long maxBusyWaitDuration; -313 -314 // Max cell size. If nonzero, the maximum allowed size for any given cell -315 // in bytes -316 final long maxCellSize; -317 -318 // negative number indicates infinite timeout -319 static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; -320 final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); -321 -322 private final ConcurrentHashMapRegionScanner, Long scannerReadPoints; +288 final AtomicLong compactionsQueued = new AtomicLong(0L); +289 final AtomicLong flushesQueued = new AtomicLong(0L); +290 +291 private final WAL wal; +292 private final HRegionFileSystem fs; +293 protected final Configuration conf; +294 private final Configuration baseConf; +295 private final int rowLockWaitDuration; +296 static final int DEFAULT_ROWLOCK_WAIT_DURATION = 3; +297 +298 // The internal wait duration to acquire a lock before read/update +299 // from the region. It is not per row. The purpose of this wait time +300 // is to avoid waiting a long time while the region is busy, so that +301 // we can release the IPC handler soon enough to improve the +302 // availability of the region server. It can be adjusted by +303 // tuning configuration "hbase.busy.wait.duration". +304 final long busyWaitDuration; +305 static final long DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT; +306 +307 // If updating multiple rows in one call, wait longer, +308 // i.e. waiting for busyWaitDuration * # of rows. However, +309 // we can limit the max multiplier. +310 final int maxBusyWaitMultiplier; +311 +312 // Max busy wait duration. There is no point to wait longer than the RPC +313 // purge timeout, when a RPC call will be terminated by the RPC engine. +314 final long maxBusyWaitDuration; +315 +316 // Max cell size. If nonzero, the maximum allowed size for any given cell +317 // in bytes +318 final long maxCellSize; +319 +320 // negative number indicates infinite timeout +321 static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; +322 final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); 323 -324 /** -325 * The sequence ID that was enLongAddered when this region was opened. -326 */ -327 private long openSeqNum = HConstants.NO_SEQNUM; -328 -329 /** -330 * The default setting for whether to enable on-demand CF loading for -331 * scan requests to this region. Requests can override it. -332 */ -333 private boolean isLoadingCfsOnDemandDefault = false; -334 -335 private final AtomicInteger majorInProgress = new AtomicInteger(0); -336 private final AtomicInteger minorInProgress = new AtomicInteger(0); -337 -338 // -339 // Context: During replay we want to ensure that we do not lose any data. So, we -340 // have to be conservative in how we replay wals. For each store, we calculate -341 // the maxSeqId up to which the store was
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html new file mode 100644 index 000..0b6b941 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html @@ -0,0 +1,306 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +VisibilityNewVersionBehaivorTracker.TagInfo (Apache HBase 3.0.0-SNAPSHOT API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.security.visibility +Class VisibilityNewVersionBehaivorTracker.TagInfo + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.security.visibility.VisibilityNewVersionBehaivorTracker.TagInfo + + + + + + + +Enclosing class: +VisibilityNewVersionBehaivorTracker + + + +private static class VisibilityNewVersionBehaivorTracker.TagInfo +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object + + + + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true; title="class or interface in java.lang">Byte +format + + +(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListTag +tags + + + + + + + + + +Constructor Summary + +Constructors + +Modifier +Constructor and Description + + +private +TagInfo() + + +private +TagInfo(Cellc) + + + + + + + + + +Method Summary + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Field Detail + + + + + +tags +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListTag tags + + + + + + + +format +http://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true; title="class or interface in java.lang">Byte format + + + + + + + + + +Constructor Detail + + + + + +TagInfo +privateTagInfo(Cellc) + + + + + + + +TagInfo +privateTagInfo() + + +
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html index 49ef112..b3d1843 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html @@ -141,3316 +141,3314 @@ 133import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; 134import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; 135import org.apache.hadoop.hbase.procedure2.LockInfo; -136import org.apache.hadoop.hbase.procedure2.ProcedureEvent; -137import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -138import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; -139import org.apache.hadoop.hbase.quotas.MasterQuotaManager; -140import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver; -141import org.apache.hadoop.hbase.quotas.QuotaObserverChore; -142import org.apache.hadoop.hbase.quotas.QuotaUtil; -143import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore; -144import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier; -145import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory; -146import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; -147import org.apache.hadoop.hbase.regionserver.HRegionServer; -148import org.apache.hadoop.hbase.regionserver.HStore; -149import org.apache.hadoop.hbase.regionserver.RSRpcServices; -150import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; -151import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; -152import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; -153import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; -154import org.apache.hadoop.hbase.replication.ReplicationException; -155import org.apache.hadoop.hbase.replication.ReplicationFactory; -156import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -157import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -158import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; -159import org.apache.hadoop.hbase.replication.master.TableCFsUpdater; -160import org.apache.hadoop.hbase.replication.regionserver.Replication; -161import org.apache.hadoop.hbase.security.AccessDeniedException; -162import org.apache.hadoop.hbase.security.UserProvider; -163import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -170import org.apache.hadoop.hbase.util.Addressing; -171import org.apache.hadoop.hbase.util.Bytes; -172import org.apache.hadoop.hbase.util.CompressionTest; -173import org.apache.hadoop.hbase.util.EncryptionTest; -174import org.apache.hadoop.hbase.util.FSUtils; -175import org.apache.hadoop.hbase.util.HFileArchiveUtil; -176import org.apache.hadoop.hbase.util.HasThread; -177import org.apache.hadoop.hbase.util.IdLock; -178import org.apache.hadoop.hbase.util.ModifyRegionUtils; -179import org.apache.hadoop.hbase.util.Pair; -180import org.apache.hadoop.hbase.util.Threads; -181import org.apache.hadoop.hbase.util.VersionInfo; -182import org.apache.hadoop.hbase.util.ZKDataMigrator; -183import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; -184import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; -185import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -186import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker; -187import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; -188import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -189import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; -190import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -191import org.apache.hadoop.hbase.zookeeper.ZKUtil; -192import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -193import org.apache.zookeeper.KeeperException; -194import org.eclipse.jetty.server.Server; -195import org.eclipse.jetty.server.ServerConnector; -196import org.eclipse.jetty.servlet.ServletHolder; -197import org.eclipse.jetty.webapp.WebAppContext; -198 -199import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html index 9677c93..54e0624 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html @@ -513,13 +513,7 @@ 505 throws ServiceException { 506return stub.splitRegion(controller, request); 507 } -508 -509 @Override -510 public DispatchMergingRegionsResponse dispatchMergingRegions(RpcController controller, -511 DispatchMergingRegionsRequest request) throws ServiceException { -512return stub.dispatchMergingRegions(controller, request); -513 } -514} +508}
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html index bdad4fe..8477715 100644 --- a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html +++ b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html @@ -500,9 +500,9 @@ implements HColumnDescriptor(byte[]familyName) -Deprecated. -Construct a column descriptor specifying only the family name - The other attributes are defaulted. +Deprecated. +use ColumnFamilyDescriptorBuilder.of(byte[]) + @@ -514,8 +514,9 @@ implements HColumnDescriptor(HColumnDescriptordesc) -Deprecated. -Constructor. +Deprecated. +use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor) + @@ -528,9 +529,9 @@ implements HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfamilyName) -Deprecated. -Construct a column descriptor specifying only the family name - The other attributes are defaulted. +Deprecated. +use ColumnFamilyDescriptorBuilder.of(String) + @@ -1840,8 +1841,8 @@ implements HColumnDescriptor -publicHColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfamilyName) -Deprecated. +publicHColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfamilyName) +Deprecated.use ColumnFamilyDescriptorBuilder.of(String) Construct a column descriptor specifying only the family name The other attributes are defaulted. @@ -1857,8 +1858,8 @@ implements HColumnDescriptor -publicHColumnDescriptor(byte[]familyName) -Deprecated. +publicHColumnDescriptor(byte[]familyName) +Deprecated.use ColumnFamilyDescriptorBuilder.of(byte[]) Construct a column descriptor specifying only the family name The other attributes are defaulted. @@ -1874,8 +1875,8 @@ implements HColumnDescriptor -publicHColumnDescriptor(HColumnDescriptordesc) -Deprecated. +publicHColumnDescriptor(HColumnDescriptordesc) +Deprecated.use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor) Constructor. Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. @@ -1891,7 +1892,7 @@ implements HColumnDescriptor -protectedHColumnDescriptor(HColumnDescriptordesc, +protectedHColumnDescriptor(HColumnDescriptordesc, booleandeepClone) Deprecated. @@ -1902,7 +1903,7 @@ implements HColumnDescriptor -protectedHColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptordelegate) +protectedHColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptordelegate) Deprecated. @@ -1921,7 +1922,7 @@ implements isLegalFamilyName http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true; title="class or interface in java.lang">@Deprecated -public staticbyte[]isLegalFamilyName(byte[]b) +public staticbyte[]isLegalFamilyName(byte[]b) Deprecated.Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]). Parameters: @@ -1942,7 +1943,7 @@ public staticbyte[] getName -publicbyte[]getName() +publicbyte[]getName() Deprecated. Specified by: @@ -1958,7 +1959,7 @@ public staticbyte[] getNameAsString -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetNameAsString() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetNameAsString() Deprecated. Specified by: @@ -1974,7 +1975,7 @@ public staticbyte[] getValue -publicbyte[]getValue(byte[]key) +publicbyte[]getValue(byte[]key) Deprecated. Specified by: @@ -1992,7 +1993,7 @@ public staticbyte[] getValue -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkey) +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkey) Deprecated. Parameters: @@ -2008,7 +2009,7 @@ public staticbyte[] getValues -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapBytes,BytesgetValues()
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html index 504e470..38667c0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html @@ -2866,5375 +2866,5371 @@ 2858checkResources(); 2859 startRegionOperation(Operation.DELETE); 2860try { -2861 delete.getRow(); -2862 // All edits for the given row (across all column families) must happen atomically. -2863 doBatchMutate(delete); -2864} finally { -2865 closeRegionOperation(Operation.DELETE); -2866} -2867 } -2868 -2869 /** -2870 * Row needed by below method. -2871 */ -2872 private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); -2873 -2874 /** -2875 * This is used only by unit tests. Not required to be a public API. -2876 * @param familyMap map of family to edits for the given family. -2877 * @throws IOException -2878 */ -2879 void delete(NavigableMapbyte[], ListCell familyMap, -2880 Durability durability) throws IOException { -2881Delete delete = new Delete(FOR_UNIT_TESTS_ONLY); -2882 delete.setFamilyCellMap(familyMap); -2883delete.setDurability(durability); -2884doBatchMutate(delete); -2885 } -2886 -2887 @Override -2888 public void prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell familyMap, -2889 byte[] byteNow) throws IOException { -2890for (Map.Entrybyte[], ListCell e : familyMap.entrySet()) { -2891 -2892 byte[] family = e.getKey(); -2893 ListCell cells = e.getValue(); -2894 assert cells instanceof RandomAccess; -2895 -2896 Mapbyte[], Integer kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); -2897 int listSize = cells.size(); -2898 for (int i=0; i listSize; i++) { -2899Cell cell = cells.get(i); -2900// Check if time is LATEST, change to time of most recent addition if so -2901// This is expensive. -2902if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP CellUtil.isDeleteType(cell)) { -2903 byte[] qual = CellUtil.cloneQualifier(cell); -2904 if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY; -2905 -2906 Integer count = kvCount.get(qual); -2907 if (count == null) { -2908kvCount.put(qual, 1); -2909 } else { -2910kvCount.put(qual, count + 1); -2911 } -2912 count = kvCount.get(qual); -2913 -2914 Get get = new Get(CellUtil.cloneRow(cell)); -2915 get.setMaxVersions(count); -2916 get.addColumn(family, qual); -2917 if (coprocessorHost != null) { -2918if (!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell, -2919byteNow, get)) { -2920 updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow); -2921} -2922 } else { -2923 updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow); -2924 } -2925} else { -2926 CellUtil.updateLatestStamp(cell, byteNow, 0); -2927} -2928 } -2929} -2930 } -2931 -2932 void updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] byteNow) -2933 throws IOException { -2934ListCell result = get(get, false); -2935 -2936if (result.size() count) { -2937 // Nothing to delete -2938 CellUtil.updateLatestStamp(cell, byteNow, 0); -2939 return; -2940} -2941if (result.size() count) { -2942 throw new RuntimeException("Unexpected size: " + result.size()); -2943} -2944Cell getCell = result.get(count - 1); -2945CellUtil.setTimestamp(cell, getCell.getTimestamp()); -2946 } -2947 -2948 @Override -2949 public void put(Put put) throws IOException { -2950checkReadOnly(); -2951 -2952// Do a rough check that we have resources to accept a write. The check is -2953// 'rough' in that between the resource check and the call to obtain a -2954// read lock, resources may run out. For now, the thought is that this -2955// will be extremely rare; we'll deal with it when it happens. -2956checkResources(); -2957 startRegionOperation(Operation.PUT); -2958try { -2959 // All edits for the given row (across all column families) must happen atomically. -2960 doBatchMutate(put); -2961} finally { -2962 closeRegionOperation(Operation.PUT); -2963} -2964 } -2965 -2966 /** -2967 * Struct-like class that tracks the
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html index feb42ea..4bd98f4 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html @@ -185,4189 +185,4266 @@ 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -188import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -189import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; -190import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -191import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -192import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; -193import org.apache.hadoop.hbase.util.Addressing; -194import org.apache.hadoop.hbase.util.Bytes; -195import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -196import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -197import org.apache.hadoop.hbase.util.Pair; -198import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -199import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -200import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -201import org.apache.hadoop.ipc.RemoteException; -202import org.apache.hadoop.util.StringUtils; -203import org.apache.zookeeper.KeeperException; -204 -205import com.google.common.annotations.VisibleForTesting; -206import com.google.protobuf.Descriptors; -207import com.google.protobuf.Message; -208import com.google.protobuf.RpcController; -209import java.util.stream.Collectors; -210 -211/** -212 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that -213 * this is an HBase-internal class as defined in -214 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html -215 * There are no guarantees for backwards source / binary compatibility and methods or class can -216 * change or go away without deprecation. -217 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing -218 * an HBaseAdmin directly. -219 * -220 * pConnection should be an iunmanaged/i connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +191import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html index 30d5dd8..beacb44 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html @@ -109,497 +109,499 @@ 101 private static final ScheduledExecutorService IDLE_CONN_SWEEPER = Executors 102 .newScheduledThreadPool(1, Threads.newDaemonThreadFactory("Idle-Rpc-Conn-Sweeper")); 103 -104 protected final static MapKind, TokenSelector? extends TokenIdentifier TOKEN_HANDLERS = new HashMap(); -105 -106 static { -107 TOKEN_HANDLERS.put(Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector()); -108 } -109 -110 protected boolean running = true; // if client runs +104 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_MUTABLE_COLLECTION_PKGPROTECT", +105 justification="the rest of the system which live in the different package can use") +106 protected final static MapKind, TokenSelector? extends TokenIdentifier TOKEN_HANDLERS = new HashMap(); +107 +108 static { +109 TOKEN_HANDLERS.put(Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector()); +110 } 111 -112 protected final Configuration conf; -113 protected final String clusterId; -114 protected final SocketAddress localAddr; -115 protected final MetricsConnection metrics; -116 -117 protected final UserProvider userProvider; -118 protected final CellBlockBuilder cellBlockBuilder; -119 -120 protected final int minIdleTimeBeforeClose; // if the connection is idle for more than this -121 // time (in ms), it will be closed at any moment. -122 protected final int maxRetries; // the max. no. of retries for socket connections -123 protected final long failureSleep; // Time to sleep before retry on failure. -124 protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm -125 protected final boolean tcpKeepAlive; // if T then use keepalives -126 protected final Codec codec; -127 protected final CompressionCodec compressor; -128 protected final boolean fallbackAllowed; -129 -130 protected final FailedServers failedServers; +112 protected boolean running = true; // if client runs +113 +114 protected final Configuration conf; +115 protected final String clusterId; +116 protected final SocketAddress localAddr; +117 protected final MetricsConnection metrics; +118 +119 protected final UserProvider userProvider; +120 protected final CellBlockBuilder cellBlockBuilder; +121 +122 protected final int minIdleTimeBeforeClose; // if the connection is idle for more than this +123 // time (in ms), it will be closed at any moment. +124 protected final int maxRetries; // the max. no. of retries for socket connections +125 protected final long failureSleep; // Time to sleep before retry on failure. +126 protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm +127 protected final boolean tcpKeepAlive; // if T then use keepalives +128 protected final Codec codec; +129 protected final CompressionCodec compressor; +130 protected final boolean fallbackAllowed; 131 -132 protected final int connectTO; -133 protected final int readTO; -134 protected final int writeTO; -135 -136 protected final PoolMapConnectionId, T connections; +132 protected final FailedServers failedServers; +133 +134 protected final int connectTO; +135 protected final int readTO; +136 protected final int writeTO; 137 -138 private final AtomicInteger callIdCnt = new AtomicInteger(0); +138 protected final PoolMapConnectionId, T connections; 139 -140 private final ScheduledFuture? cleanupIdleConnectionTask; +140 private final AtomicInteger callIdCnt = new AtomicInteger(0); 141 -142 private int maxConcurrentCallsPerServer; +142 private final ScheduledFuture? cleanupIdleConnectionTask; 143 -144 private static final LoadingCacheInetSocketAddress, AtomicInteger concurrentCounterCache = -145 CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS). -146 build(new CacheLoaderInetSocketAddress, AtomicInteger() { -147@Override public AtomicInteger load(InetSocketAddress key) throws Exception { -148 return new AtomicInteger(0); -149} -150 }); -151 -152 /** -153 * Construct an IPC client for the cluster codeclusterId/code -154 * @param conf configuration -155 * @param clusterId the cluster id -156 * @param localAddr client socket bind address. -157 * @param metrics the connection metrics -158 */ -159 public
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html index 75db22d..99a09f9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html @@ -37,2710 +37,2816 @@ 029import java.util.List; 030import java.util.Map; 031import java.util.Optional; -032import java.util.concurrent.CompletableFuture; -033import java.util.concurrent.TimeUnit; -034import java.util.concurrent.atomic.AtomicReference; -035import java.util.function.BiConsumer; -036import java.util.regex.Pattern; -037import java.util.stream.Collectors; -038 -039import com.google.common.annotations.VisibleForTesting; -040 -041import io.netty.util.Timeout; -042import io.netty.util.TimerTask; -043 -044import java.util.stream.Stream; -045 -046import org.apache.commons.io.IOUtils; -047import org.apache.commons.logging.Log; -048import org.apache.commons.logging.LogFactory; -049import org.apache.hadoop.hbase.ClusterStatus; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLoad; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.ServerName; -059import org.apache.hadoop.hbase.NamespaceDescriptor; -060import org.apache.hadoop.hbase.HConstants; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -064import org.apache.hadoop.hbase.TableNotDisabledException; -065import org.apache.hadoop.hbase.TableNotEnabledException; -066import org.apache.hadoop.hbase.TableNotFoundException; -067import org.apache.hadoop.hbase.UnknownRegionException; -068import org.apache.hadoop.hbase.classification.InterfaceAudience; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; -098import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html index 71844ce..75db22d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html @@ -105,2564 +105,2642 @@ 097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; 099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -139import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html index f5bc73a..feb42ea 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html @@ -4044,345 +4044,330 @@ 4036 4037 @Override 4038 public void drainRegionServers(ListServerName servers) throws IOException { -4039final ListHBaseProtos.ServerName pbServers = new ArrayList(servers.size()); -4040for (ServerName server : servers) { -4041 // Parse to ServerName to do simple validation. -4042 ServerName.parseServerName(server.toString()); -4043 pbServers.add(ProtobufUtil.toServerName(server)); -4044} -4045 -4046executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { -4047 @Override -4048 public Void rpcCall() throws ServiceException { -4049DrainRegionServersRequest req = -4050 DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build(); -4051 master.drainRegionServers(getRpcController(), req); -4052return null; -4053 } -4054}); -4055 } -4056 -4057 @Override -4058 public ListServerName listDrainingRegionServers() throws IOException { -4059return executeCallable(new MasterCallableListServerName(getConnection(), -4060 getRpcControllerFactory()) { -4061 @Override -4062 public ListServerName rpcCall() throws ServiceException { -4063ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build(); -4064ListServerName servers = new ArrayList(); -4065for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req) -4066.getServerNameList()) { -4067 servers.add(ProtobufUtil.toServerName(server)); -4068} -4069return servers; -4070 } -4071}); -4072 } -4073 -4074 @Override -4075 public void removeDrainFromRegionServers(ListServerName servers) throws IOException { -4076final ListHBaseProtos.ServerName pbServers = new ArrayList(servers.size()); -4077for (ServerName server : servers) { -4078 pbServers.add(ProtobufUtil.toServerName(server)); -4079} -4080 -4081executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { -4082 @Override -4083 public Void rpcCall() throws ServiceException { -4084 RemoveDrainFromRegionServersRequest req = RemoveDrainFromRegionServersRequest.newBuilder() -4085 .addAllServerName(pbServers).build(); -4086 master.removeDrainFromRegionServers(getRpcController(), req); -4087return null; +4039executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { +4040 @Override +4041 public Void rpcCall() throws ServiceException { +4042 master.drainRegionServers(getRpcController(), +4043 RequestConverter.buildDrainRegionServersRequest(servers)); +4044return null; +4045 } +4046}); +4047 } +4048 +4049 @Override +4050 public ListServerName listDrainingRegionServers() throws IOException { +4051return executeCallable(new MasterCallableListServerName(getConnection(), +4052 getRpcControllerFactory()) { +4053 @Override +4054 public ListServerName rpcCall() throws ServiceException { +4055ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build(); +4056ListServerName servers = new ArrayList(); +4057for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req) +4058.getServerNameList()) { +4059 servers.add(ProtobufUtil.toServerName(server)); +4060} +4061return servers; +4062 } +4063}); +4064 } +4065 +4066 @Override +4067 public void removeDrainFromRegionServers(ListServerName servers) throws IOException { +4068executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { +4069 @Override +4070 public Void rpcCall() throws ServiceException { +4071 master.removeDrainFromRegionServers(getRpcController(), RequestConverter.buildRemoveDrainFromRegionServersRequest(servers)); +4072return null; +4073 } +4074}); +4075 } +4076 +4077 @Override +4078 public ListTableCFs listReplicatedTableCFs() throws IOException { +4079ListTableCFs replicatedTableCFs = new ArrayList(); +4080HTableDescriptor[] tables =
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/book.html -- diff --git a/book.html b/book.html index 9eeedb5..3cf6127 100644 --- a/book.html +++ b/book.html @@ -1381,11 +1381,10 @@ To check for well-formedness and only print output if errors exist, use the comm Keep Configuration In Sync Across the Cluster -When running in distributed mode, after you make an edit to an HBase configuration, make sure you copy the content of the conf/ directory to all nodes of the cluster. +When running in distributed mode, after you make an edit to an HBase configuration, make sure you copy the contents of the conf/ directory to all nodes of the cluster. HBase will not do this for you. Use rsync, scp, or another secure mechanism for copying the configuration files to your nodes. -For most configuration, a restart is needed for servers to pick up changes An exception is dynamic configuration. -to be described later below. +For most configurations, a restart is needed for servers to pick up changes. Dynamic configuration is an exception to this, to be described later below. @@ -1473,12 +1472,12 @@ You must set JAVA_HOME on each node of your cluster. hbase-env. Loopback IP -Prior to hbase-0.96.0, HBase only used the IP address 127.0.0.1 to refer to localhost, and this could not be configured. +Prior to hbase-0.96.0, HBase only used the IP address 127.0.0.1 to refer to localhost, and this was not configurable. See Loopback IP for more details. NTP -The clocks on cluster nodes should be synchronized. A small amount of variation is acceptable, but larger amounts of skew can cause erratic and unexpected behavior. Time synchronization is one of the first things to check if you see unexplained problems in your cluster. It is recommended that you run a Network Time Protocol (NTP) service, or another time-synchronization mechanism, on your cluster, and that all nodes look to the same service for time synchronization. See the http://www.tldp.org/LDP/sag/html/basic-ntp-config.html;>Basic NTP Configuration at The Linux Documentation Project (TLDP) to set up NTP. +The clocks on cluster nodes should be synchronized. A small amount of variation is acceptable, but larger amounts of skew can cause erratic and unexpected behavior. Time synchronization is one of the first things to check if you see unexplained problems in your cluster. It is recommended that you run a Network Time Protocol (NTP) service, or another time-synchronization mechanism on your cluster and that all nodes look to the same service for time synchronization. See the http://www.tldp.org/LDP/sag/html/basic-ntp-config.html;>Basic NTP Configuration at The Linux Documentation Project (TLDP) to set up NTP. @@ -1540,8 +1539,8 @@ hadoop - nproc 32000 Windows -Prior to HBase 0.96, testing for running HBase on Microsoft Windows was limited. -Running a on Windows nodes is not recommended for production systems. +Prior to HBase 0.96, running HBase on Microsoft Windows was limited only for testing purposes. +Running production systems on Windows machines is not recommended. @@ -1774,8 +1773,8 @@ data loss. This patch is present in Apache Hadoop releases 2.6.1+. The bundled jar is ONLY for use in standalone mode. In distributed mode, it is critical that the version of Hadoop that is out on your cluster match what is under HBase. Replace the hadoop jar found in the HBase lib directory with the hadoop jar you are running on your cluster to avoid version mismatch issues. -Make sure you replace the jar in HBase everywhere on your cluster. -Hadoop version mismatch issues have various manifestations but often all looks like its hung up. +Make sure you replace the jar in HBase across your whole cluster. +Hadoop version mismatch issues have various manifestations but often all look like its hung. @@ -1860,7 +1859,7 @@ HDFS where data is replicated ensures the latter. To configure this standalone variant, edit your hbase-site.xml -setting the hbase.rootdir to point at a directory in your +setting hbase.rootdir to point at a directory in your HDFS instance but then set hbase.cluster.distributed to false. For example: @@ -1912,8 +1911,8 @@ Some of the information that was originally in this section has been moved there A pseudo-distributed mode is simply a fully-distributed mode run on a single host. -Use this configuration testing and prototyping on HBase. -Do not use this configuration for production nor for evaluating HBase performance. +Use this HBase configuration for testing and prototyping purposes only. +Do not use this configuration for production or for performance evaluation. @@ -1922,11 +1921,11 @@ Do not use this configuration for production nor for evaluating HBase performanc By default, HBase runs in standalone mode. Both standalone mode and pseudo-distributed mode are
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html index 249d4a0..7369fdf 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html @@ -65,12 +65,12 @@ 057import com.google.common.base.Preconditions; 058 059/** -060 * Reads {@link HFile} version 2 blocks to HFiles and via {@link Cacheable} Interface to caches. -061 * Version 2 was introduced in hbase-0.92.0. No longer has support for version 1 blocks since -062 * hbase-1.3.0. -063 * -064 * pVersion 1 was the original file block. Version 2 was introduced when we changed the hbase file -065 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). +060 * Cacheable Blocks of an {@link HFile} version 2 file. +061 * Version 2 was introduced in hbase-0.92.0. +062 * +063 * pVersion 1 was the original file block. Version 2 was introduced when we changed the hbase file +064 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support +065 * for Version 1 was removed in hbase-1.3.0. 066 * 067 * h3HFileBlock: Version 2/h3 068 * In version 2, a block is structured as follows: @@ -120,582 +120,582 @@ 112public class HFileBlock implements Cacheable { 113 private static final Log LOG = LogFactory.getLog(HFileBlock.class); 114 -115 /** Type of block. Header field 0. */ -116 private BlockType blockType; -117 -118 /** -119 * Size on disk excluding header, including checksum. Header field 1. -120 * @see Writer#putHeader(byte[], int, int, int, int) -121 */ -122 private int onDiskSizeWithoutHeader; -123 -124 /** -125 * Size of pure data. Does not include header or checksums. Header field 2. -126 * @see Writer#putHeader(byte[], int, int, int, int) -127 */ -128 private int uncompressedSizeWithoutHeader; -129 -130 /** -131 * The offset of the previous block on disk. Header field 3. -132 * @see Writer#putHeader(byte[], int, int, int, int) -133 */ -134 private long prevBlockOffset; -135 -136 /** -137 * Size on disk of header + data. Excludes checksum. Header field 6, -138 * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. -139 * @see Writer#putHeader(byte[], int, int, int, int) -140 */ -141 private int onDiskDataSizeWithHeader; -142 -143 -144 /** -145 * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by -146 * a single ByteBuffer or by many. Make no assumptions. -147 * -148 * pBe careful reading from this codebuf/code. Duplicate and work on the duplicate or if -149 * not, be sure to reset position and limit else trouble down the road. -150 * -151 * pTODO: Make this read-only once made. -152 * -153 * pWe are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have -154 * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. -155 * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be -156 * good if could be confined to cache-use only but hard-to-do. -157 */ -158 private ByteBuff buf; -159 -160 /** Meta data that holds meta information on the hfileblock. -161 */ -162 private HFileContext fileContext; -163 -164 /** -165 * The offset of this block in the file. Populated by the reader for -166 * convenience of access. This offset is not part of the block header. -167 */ -168 private long offset = UNSET; -169 -170 private MemoryType memType = MemoryType.EXCLUSIVE; -171 -172 /** -173 * The on-disk size of the next block, including the header and checksums if present, obtained by -174 * peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's -175 * header, or UNSET if unknown. -176 * -177 * Blocks try to carry the size of the next block to read in this data member. They will even have -178 * this value when served from cache. Could save a seek in the case where we are iterating through -179 * a file and some of the blocks come from cache. If from cache, then having this info to hand -180 * will save us doing a seek to read the header so we can read the body of a block. -181 * TODO: see how effective this is at saving seeks. -182 */ -183 private int nextBlockOnDiskSize = UNSET; -184 -185 /** -186 * On a checksum failure, do these many succeeding read requests using hdfs checksums before -187 * auto-reenabling hbase checksum verification. -188 */ -189 static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3; -190 -191 private
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html deleted file mode 100644 index 8f0eb1f..000 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html +++ /dev/null @@ -1,339 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer (Apache HBase 3.0.0-SNAPSHOT API) - - - - - -var methods = {"i0":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase.client -Class AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer - - -org.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer - - -org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer - - - - - - - - - - - -All Implemented Interfaces: -http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true; title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwable - - -Enclosing class: -AsyncHBaseAdmin - - - -private class AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer -extends AsyncHBaseAdmin.TableProcedureBiConsumer - - - - - - - - - - - -Field Summary - - - - -Fields inherited from classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer -tableName - - - - - -Fields inherited from classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer -admin - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -ModifyColumnFamilyProcedureBiConsumer(AsyncAdminadmin, - TableNametableName) - - - - - - - - - -Method Summary - -All MethodsInstance MethodsConcrete Methods - -Modifier and Type -Method and Description - - -(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -getOperationType() - - - - - - -Methods inherited from classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer -getDescription, onError, onFinished - - - - - -Methods inherited from classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer -accept - - - - - -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html index dc12c09..82506d2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html @@ -54,2261 +54,2259 @@ 046import org.apache.commons.io.IOUtils; 047import org.apache.commons.logging.Log; 048import org.apache.commons.logging.LogFactory; -049import org.apache.directory.api.util.OptionalComponentsMonitor; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLocations; -057import org.apache.hadoop.hbase.ServerName; -058import org.apache.hadoop.hbase.NamespaceDescriptor; -059import org.apache.hadoop.hbase.HConstants; -060import org.apache.hadoop.hbase.TableExistsException; -061import org.apache.hadoop.hbase.TableName; -062import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -063import org.apache.hadoop.hbase.TableNotDisabledException; -064import org.apache.hadoop.hbase.TableNotEnabledException; -065import org.apache.hadoop.hbase.TableNotFoundException; -066import org.apache.hadoop.hbase.UnknownRegionException; -067import org.apache.hadoop.hbase.classification.InterfaceAudience; -068import org.apache.hadoop.hbase.classification.InterfaceStability; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -103import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html b/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html index baee8c1..a0256ee 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html @@ -30,551 +30,607 @@ 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.Arrays; -025import java.util.List; -026import java.util.Map; -027import java.util.NavigableMap; -028import java.util.Optional; -029import java.util.SortedMap; -030import java.util.concurrent.CompletableFuture; -031import java.util.regex.Matcher; -032import java.util.regex.Pattern; -033 -034import org.apache.commons.logging.Log; -035import org.apache.commons.logging.LogFactory; -036import org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor; -037import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -038import org.apache.hadoop.hbase.MetaTableAccessor.Visitor; -039import org.apache.hadoop.hbase.classification.InterfaceAudience; -040import org.apache.hadoop.hbase.client.Consistency; -041import org.apache.hadoop.hbase.client.Get; -042import org.apache.hadoop.hbase.client.RawAsyncTable; -043import org.apache.hadoop.hbase.client.RawScanResultConsumer; -044import org.apache.hadoop.hbase.client.RegionReplicaUtil; -045import org.apache.hadoop.hbase.client.Result; -046import org.apache.hadoop.hbase.client.Scan; -047import org.apache.hadoop.hbase.client.TableState; -048import org.apache.hadoop.hbase.exceptions.DeserializationException; -049import org.apache.hadoop.hbase.util.Bytes; -050import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -051import org.apache.hadoop.hbase.util.Pair; -052 -053/** -054 * The asynchronous meta table accessor. Used to read/write region and assignment information store -055 * in codehbase:meta/code. -056 */ -057@InterfaceAudience.Private -058public class AsyncMetaTableAccessor { -059 -060 private static final Log LOG = LogFactory.getLog(AsyncMetaTableAccessor.class); -061 -062 -063 /** The delimiter for meta columns for replicaIds gt; 0 */ -064 private static final char META_REPLICA_ID_DELIMITER = '_'; +025import java.util.Collections; +026import java.util.List; +027import java.util.Map; +028import java.util.NavigableMap; +029import java.util.Optional; +030import java.util.SortedMap; +031import java.util.concurrent.CompletableFuture; +032import java.util.regex.Matcher; +033import java.util.regex.Pattern; +034import java.util.stream.Collectors; +035 +036import org.apache.commons.logging.Log; +037import org.apache.commons.logging.LogFactory; +038import org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor; +039import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; +040import org.apache.hadoop.hbase.MetaTableAccessor.Visitor; +041import org.apache.hadoop.hbase.classification.InterfaceAudience; +042import org.apache.hadoop.hbase.client.Connection; +043import org.apache.hadoop.hbase.client.Consistency; +044import org.apache.hadoop.hbase.client.Get; +045import org.apache.hadoop.hbase.client.RawAsyncTable; +046import org.apache.hadoop.hbase.client.RawScanResultConsumer; +047import org.apache.hadoop.hbase.client.RegionReplicaUtil; +048import org.apache.hadoop.hbase.client.Result; +049import org.apache.hadoop.hbase.client.Scan; +050import org.apache.hadoop.hbase.client.TableState; +051import org.apache.hadoop.hbase.client.Scan.ReadType; +052import org.apache.hadoop.hbase.exceptions.DeserializationException; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +055import org.apache.hadoop.hbase.util.Pair; +056 +057/** +058 * The asynchronous meta table accessor. Used to read/write region and assignment information store +059 * in codehbase:meta/code. +060 */ +061@InterfaceAudience.Private +062public class AsyncMetaTableAccessor { +063 +064 private static final Log LOG = LogFactory.getLog(AsyncMetaTableAccessor.class); 065 -066 /** A regex for parsing server columns from meta. See above javadoc for meta layout */ -067 private static final Pattern SERVER_COLUMN_PATTERN = Pattern -068 .compile("^server(_[0-9a-fA-F]{4})?$"); +066 +067 /** The delimiter for meta columns for replicaIds gt; 0 */ +068 private static final char META_REPLICA_ID_DELIMITER = '_'; 069 -070 public static CompletableFutureBoolean tableExists(RawAsyncTable metaTable, TableName tableName) { -071if (tableName.equals(META_TABLE_NAME)) { -072 return CompletableFuture.completedFuture(true); -073} -074return getTableState(metaTable, tableName).thenApply(Optional::isPresent); -075 } -076 -077 public static
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html index 6de986f..c895448 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html @@ -26,1592 +26,1693 @@ 018package org.apache.hadoop.hbase.master.balancer; 019 020import java.util.ArrayDeque; -021import java.util.Arrays; -022import java.util.Collection; -023import java.util.Deque; -024import java.util.HashMap; -025import java.util.LinkedList; -026import java.util.List; -027import java.util.Map; -028import java.util.Map.Entry; -029import java.util.Random; -030 -031import org.apache.commons.logging.Log; -032import org.apache.commons.logging.LogFactory; -033import org.apache.hadoop.conf.Configuration; -034import org.apache.hadoop.hbase.ClusterStatus; -035import org.apache.hadoop.hbase.HBaseInterfaceAudience; -036import org.apache.hadoop.hbase.HConstants; -037import org.apache.hadoop.hbase.HRegionInfo; -038import org.apache.hadoop.hbase.RegionLoad; -039import org.apache.hadoop.hbase.ServerLoad; -040import org.apache.hadoop.hbase.ServerName; -041import org.apache.hadoop.hbase.TableName; -042import org.apache.hadoop.hbase.classification.InterfaceAudience; -043import org.apache.hadoop.hbase.master.MasterServices; -044import org.apache.hadoop.hbase.master.RegionPlan; -045import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; -046import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; -047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; -048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; -049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; -050import org.apache.hadoop.hbase.util.Bytes; -051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -052 -053import com.google.common.collect.Lists; -054 -055/** -056 * pThis is a best effort load balancer. Given a Cost function F(C) =gt; x It will -057 * randomly try and mutate the cluster to Cprime. If F(Cprime) lt; F(C) then the -058 * new cluster state becomes the plan. It includes costs functions to compute the cost of:/p -059 * ul -060 * liRegion Load/li -061 * liTable Load/li -062 * liData Locality/li -063 * liMemstore Sizes/li -064 * liStorefile Sizes/li -065 * /ul -066 * -067 * -068 * pEvery cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -069 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -070 * scaled by their respective multipliers:/p +021import java.util.ArrayList; +022import java.util.Arrays; +023import java.util.Collection; +024import java.util.Collections; +025import java.util.Deque; +026import java.util.HashMap; +027import java.util.LinkedList; +028import java.util.List; +029import java.util.Map; +030import java.util.Map.Entry; +031import java.util.Random; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.ClusterStatus; +037import org.apache.hadoop.hbase.HBaseInterfaceAudience; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HRegionInfo; +040import org.apache.hadoop.hbase.RegionLoad; +041import org.apache.hadoop.hbase.ServerLoad; +042import org.apache.hadoop.hbase.ServerName; +043import org.apache.hadoop.hbase.TableName; +044import org.apache.hadoop.hbase.classification.InterfaceAudience; +045import org.apache.hadoop.hbase.master.MasterServices; +046import org.apache.hadoop.hbase.master.RegionPlan; +047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; +048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; +049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; +050import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType; +051import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +052import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +055 +056import
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index 0c3fe3b..d262744 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -149,3339 +149,3348 @@ 141import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver; 142import org.apache.hadoop.hbase.quotas.QuotaObserverChore; 143import org.apache.hadoop.hbase.quotas.QuotaUtil; -144import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier; -145import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory; -146import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; -147import org.apache.hadoop.hbase.regionserver.HRegionServer; -148import org.apache.hadoop.hbase.regionserver.HStore; -149import org.apache.hadoop.hbase.regionserver.RSRpcServices; -150import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; -151import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; -152import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; -153import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; -154import org.apache.hadoop.hbase.replication.ReplicationException; -155import org.apache.hadoop.hbase.replication.ReplicationFactory; -156import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -157import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -158import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; -159import org.apache.hadoop.hbase.replication.master.TableCFsUpdater; -160import org.apache.hadoop.hbase.replication.regionserver.Replication; -161import org.apache.hadoop.hbase.security.AccessDeniedException; -162import org.apache.hadoop.hbase.security.UserProvider; -163import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; -170import org.apache.hadoop.hbase.util.Addressing; -171import org.apache.hadoop.hbase.util.Bytes; -172import org.apache.hadoop.hbase.util.CompressionTest; -173import org.apache.hadoop.hbase.util.EncryptionTest; -174import org.apache.hadoop.hbase.util.FSUtils; -175import org.apache.hadoop.hbase.util.HFileArchiveUtil; -176import org.apache.hadoop.hbase.util.HasThread; -177import org.apache.hadoop.hbase.util.IdLock; -178import org.apache.hadoop.hbase.util.ModifyRegionUtils; -179import org.apache.hadoop.hbase.util.Pair; -180import org.apache.hadoop.hbase.util.Threads; -181import org.apache.hadoop.hbase.util.VersionInfo; -182import org.apache.hadoop.hbase.util.ZKDataMigrator; -183import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; -184import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; -185import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -186import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker; -187import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; -188import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -189import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; -190import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -191import org.apache.hadoop.hbase.zookeeper.ZKUtil; -192import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -193import org.apache.zookeeper.KeeperException; -194import org.eclipse.jetty.server.Server; -195import org.eclipse.jetty.server.ServerConnector; -196import org.eclipse.jetty.servlet.ServletHolder; -197import org.eclipse.jetty.webapp.WebAppContext; -198 -199import com.google.common.annotations.VisibleForTesting; -200import com.google.common.collect.Lists; -201import com.google.common.collect.Maps; -202import com.google.protobuf.Descriptors; -203import com.google.protobuf.Service; -204 -205/** -206 * HMaster is the "master server" for HBase. An HBase cluster has one active -207 * master. If many masters are started, all compete. Whichever wins goes on to -208 * run the cluster. All others park themselves in their constructor until -209 * master or cluster shutdown or until the active master loses its lease in -210 * zookeeper. Thereafter, all running master jostle to take over master role. -211 * -212 * pThe Master can be
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html index 8cfa92f..451f15f 100644 --- a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html +++ b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html @@ -262,7 +262,7 @@ the order they are declared. values -public staticKeepDeletedCells[]values() +public staticKeepDeletedCells[]values() Returns an array containing the constants of this enum type, in the order they are declared. This method may be used to iterate over the constants as follows: @@ -282,7 +282,7 @@ for (KeepDeletedCells c : KeepDeletedCells.values()) valueOf -public staticKeepDeletedCellsvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) +public staticKeepDeletedCellsvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an enum constant in this type. (Extraneous whitespace characters are http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html index faa94f1..992a314 100644 --- a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html +++ b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html @@ -263,7 +263,7 @@ the order they are declared. values -public staticMemoryCompactionPolicy[]values() +public staticMemoryCompactionPolicy[]values() Returns an array containing the constants of this enum type, in the order they are declared. This method may be used to iterate over the constants as follows: @@ -283,7 +283,7 @@ for (MemoryCompactionPolicy c : MemoryCompactionPolicy.values()) valueOf -public staticMemoryCompactionPolicyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) +public staticMemoryCompactionPolicyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an enum constant in this type. (Extraneous whitespace characters are http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html index 6562834..746a472 100644 --- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html @@ -166,10 +166,10 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand -org.apache.hadoop.hbase.backup.BackupInfo.BackupState org.apache.hadoop.hbase.backup.BackupType org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase +org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand +org.apache.hadoop.hbase.backup.BackupInfo.BackupState http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html index ac70770..7c23eaf 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html @@ -99,65 +99,58 @@ -org.apache.hadoop.hbase.io.hfile - -Provides implementations of HFile and HFile - BlockCache. - - - org.apache.hadoop.hbase.mapreduce Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html index 038e1d3..0ed7e8b 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html @@ -35,530 +35,610 @@ 027import org.apache.hadoop.hbase.Cell; 028import org.apache.hadoop.hbase.CellComparator; 029import org.apache.hadoop.hbase.CellUtil; -030import org.apache.hadoop.hbase.classification.InterfaceAudience; -031import org.apache.hadoop.hbase.exceptions.DeserializationException; -032import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -033import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -034import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; -035 -036/** -037 * Implementation of {@link Filter} that represents an ordered List of Filters -038 * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} -039 * (codeAND/code) or {@link Operator#MUST_PASS_ONE} (codeOR/code). -040 * Since you can use Filter Lists as children of Filter Lists, you can create a -041 * hierarchy of filters to be evaluated. -042 * -043 * br -044 * {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does -045 * not include the KeyValue. -046 * -047 * br -048 * {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated. -049 * -050 * br -051 * Defaults to {@link Operator#MUST_PASS_ALL}. -052 */ -053@InterfaceAudience.Public -054final public class FilterList extends FilterBase { -055 /** set operator */ -056 @InterfaceAudience.Public -057 public static enum Operator { -058/** !AND */ -059MUST_PASS_ALL, -060/** !OR */ -061MUST_PASS_ONE -062 } -063 -064 private static final int MAX_LOG_FILTERS = 5; -065 private Operator operator = Operator.MUST_PASS_ALL; -066 private final ListFilter filters; -067 private Filter seekHintFilter = null; -068 -069 /** Reference Cell used by {@link #transformCell(Cell)} for validation purpose. */ -070 private Cell referenceCell = null; -071 -072 /** -073 * When filtering a given Cell in {@link #filterKeyValue(Cell)}, -074 * this stores the transformed Cell to be returned by {@link #transformCell(Cell)}. -075 * -076 * Individual filters transformation are applied only when the filter includes the Cell. -077 * Transformations are composed in the order specified by {@link #filters}. -078 */ -079 private Cell transformedCell = null; +030import org.apache.hadoop.hbase.KeyValueUtil; +031import org.apache.hadoop.hbase.classification.InterfaceAudience; +032import org.apache.hadoop.hbase.exceptions.DeserializationException; +033import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +034import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +035import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; +036 +037/** +038 * Implementation of {@link Filter} that represents an ordered List of Filters +039 * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} +040 * (codeAND/code) or {@link Operator#MUST_PASS_ONE} (codeOR/code). +041 * Since you can use Filter Lists as children of Filter Lists, you can create a +042 * hierarchy of filters to be evaluated. +043 * +044 * br +045 * {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does +046 * not include the KeyValue. +047 * +048 * br +049 * {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated. +050 * +051 * br +052 * Defaults to {@link Operator#MUST_PASS_ALL}. +053 */ +054@InterfaceAudience.Public +055final public class FilterList extends FilterBase { +056 /** set operator */ +057 @InterfaceAudience.Public +058 public static enum Operator { +059/** !AND */ +060MUST_PASS_ALL, +061/** !OR */ +062MUST_PASS_ONE +063 } +064 +065 private static final int MAX_LOG_FILTERS = 5; +066 private Operator operator = Operator.MUST_PASS_ALL; +067 private final ListFilter filters; +068 private Filter seekHintFilter = null; +069 +070 /** +071 * Save previous return code and previous cell for every filter in filter list. For MUST_PASS_ONE, +072 * we use the previous return code to decide whether we should pass current cell encountered to +073 * the filter. For MUST_PASS_ALL, the two list are meaningless. +074 */ +075 private ListReturnCode prevFilterRCList = null; +076 private ListCell prevCellList = null; +077 +078 /** Reference Cell used by {@link #transformCell(Cell)} for validation
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html index ca56dfc..ca03f9b 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html @@ -408,7 +408,7 @@ extends Procedure -addStackIndex, afterReplay, beforeReplay, compareTo, doAcquireLock, doExecute, doReleaseLock, doRollback, elapsedTime, getChildrenLatch, getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcId, getPr ocIdHashCode, getResult, getRootProcedureId, getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, hasParent, hasTimeout, haveSameParent, holdLock, incChildrenLatch, isFailed, isFinished, isInitializing, isRunnable, isSuccess, isWaiting, removeStackIndex, setAbortFailure, setChildrenLatch, setFailure, setFailure, setLastUpdate, setNonceKey, setOwner, setOwner, setParentProcId, setProcId, setResult, setRootProcId, setStackIndexes, setState, setSubmittedTime, setTimeout, setTimeoutFailure, shouldWaitClientAck, toString, toStringClass, toStringDetails, toStringSimpleSB, updateMetricsOnFinish, updateMetricsOnSubmit, updateTimestamp, wasExecuted +addStackIndex, afterReplay, beforeReplay, compareTo, doAcquireLock, doExecute, doReleaseLock, doRollback, elapsedTime, getChildrenLatch, getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcedureMetrics, getProcId, getProcIdHashCode, getResult, getRootProcedureId, getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasChildren--">hasChildren, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasException--">hasException, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasLock-TEnvironment-">hasLock, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasOwner--">hasOwner, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasParent--">hasParent, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasTimeout--">hasTimeout, > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#haveSameParent-org.apache.hadoop.hbase.procedure2.Procedure-org.apache.hadoop.hbase.procedure2.Procedure-">haveSameParent, > holdLock, incChildrenLatch, isFailed, isFinished, isInitializing, isRunnable, isSuccess, isWaiting, removeStackIndex, setAbortFailure, setChildrenLatch, setFailure, setFailure, setLastUpdate, setNonceKey, setOwner, setOwner, setParentProcId, setProcId, setResult, setRootProcId, setStackIndexes, setState, setSubmittedTime, setTimeout, setTimeoutFailure, shouldWaitClientAck, toString, toStringClass, toStringDetails, toStringSimpleSB, updateMetricsOnFinish, updateMetricsOnSubmit, updateTimestamp, wasExecuted http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html index addfeeb..e029a4f 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html @@ -409,7 +409,7 @@ extends Procedure -addStackIndex, afterReplay, beforeReplay, compareTo, completionCleanup, doAcquireLock, doExecute, d oReleaseLock, doRollback, elapsedTime, getChildrenLatch, getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcId, getProcIdHashCode, getResult, getRootProcedureId, getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, < a href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, hasParent, hasTimeout, haveSameParent, holdLock, incChildrenLatch, isFailed, isFinished, isInitializing, isRunnable, isSuccess, isWaiting, removeStackIndex, setAbortFailure, setChildrenLatch, setFailure, setFailure, setLastUpdate, setNonceKey,
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html index b6c2fe3..1765903 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html @@ -60,892 +60,917 @@ 052import org.apache.hadoop.hbase.classification.InterfaceAudience; 053import org.apache.hadoop.hbase.fs.HFileSystem; 054import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -055import org.apache.hadoop.hbase.io.compress.Compression; -056import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -057import org.apache.hadoop.hbase.protobuf.ProtobufMagic; -058import org.apache.hadoop.hbase.regionserver.CellSink; -059import org.apache.hadoop.hbase.regionserver.ShipperListener; -060import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; -061import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -062import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -063import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; -064import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; -065import org.apache.hadoop.hbase.util.BloomFilterWriter; -066import org.apache.hadoop.hbase.util.Bytes; -067import org.apache.hadoop.hbase.util.FSUtils; -068import org.apache.hadoop.io.Writable; -069 -070import com.google.common.annotations.VisibleForTesting; -071import com.google.common.base.Preconditions; -072 -073/** -074 * File format for hbase. -075 * A file of sorted key/value pairs. Both keys and values are byte arrays. -076 * p -077 * The memory footprint of a HFile includes the following (below is taken from the -078 * a -079 * href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; documentation -080 * but applies also to HFile): -081 * ul -082 * liSome constant overhead of reading or writing a compressed block. +055import org.apache.hadoop.hbase.io.MetricsIO; +056import org.apache.hadoop.hbase.io.MetricsIOWrapperImpl; +057import org.apache.hadoop.hbase.io.compress.Compression; +058import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +059import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +060import org.apache.hadoop.hbase.regionserver.CellSink; +061import org.apache.hadoop.hbase.regionserver.ShipperListener; +062import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +063import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +064import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +065import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; +066import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; +067import org.apache.hadoop.hbase.util.BloomFilterWriter; +068import org.apache.hadoop.hbase.util.Bytes; +069import org.apache.hadoop.hbase.util.FSUtils; +070import org.apache.hadoop.io.Writable; +071 +072import com.google.common.annotations.VisibleForTesting; +073import com.google.common.base.Preconditions; +074 +075/** +076 * File format for hbase. +077 * A file of sorted key/value pairs. Both keys and values are byte arrays. +078 * p +079 * The memory footprint of a HFile includes the following (below is taken from the +080 * a +081 * href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; documentation +082 * but applies also to HFile): 083 * ul -084 * liEach compressed block requires one compression/decompression codec for -085 * I/O. -086 * liTemporary space to buffer the key. -087 * liTemporary space to buffer the value. -088 * /ul -089 * liHFile index, which is proportional to the total number of Data Blocks. -090 * The total amount of memory needed to hold the index can be estimated as -091 * (56+AvgKeySize)*NumBlocks. -092 * /ul -093 * Suggestions on performance optimization. -094 * ul -095 * liMinimum block size. We recommend a setting of minimum block size between -096 * 8KB to 1MB for general usage. Larger block size is preferred if files are -097 * primarily for sequential access. However, it would lead to inefficient random -098 * access (because there are more data to decompress). Smaller blocks are good -099 * for random access, but require more memory to hold the block index, and may -100 * be slower to create (because we must flush the compressor stream at the -101 * conclusion of each data block, which leads to an FS I/O flush). Further, due -102 * to the internal caching in Compression codec, the smallest possible block -103 * size would be around 20KB-30KB. -104 * liThe current implementation does not offer true
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html index c2e9c5e..dc98b76 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html @@ -2498,14 +2498,16 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. booleantreatFailureAsError) -RegionMonitor(Connectionconnection, +RegionMonitor(Connectionconnection, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]monitorTargets, booleanuseRegExp, - Canary.Sinksink, + Canary.RegionStdOutSinksink, http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true; title="class or interface in java.util.concurrent">ExecutorServiceexecutor, booleanwriteSniffing, TableNamewriteTableName, - booleantreatFailureAsError) + booleantreatFailureAsError, + http://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true; title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">LongconfiguredReadTableTimeouts, + longconfiguredWriteTableTimeout) RegionServerMonitor(Connectionconnection, @@ -2524,12 +2526,13 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLongsuccesses) -RegionTask(Connectionconnection, +RegionTask(Connectionconnection, HRegionInforegion, ServerNameserverName, - Canary.Sinksink, + Canary.RegionStdOutSinksink, Canary.RegionTask.TaskTypetaskType, - booleanrawScanEnabled) + booleanrawScanEnabled, + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLongrwLatency) ZookeeperMonitor(Connectionconnection, http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html index d986927..8928861 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html @@ -236,11 +236,15 @@ the order they are declared. +boolean +MasterServices.isSplitOrMergeEnabled(MasterSwitchTypeswitchType) + + void MasterCoprocessorHost.postSetSplitOrMergeEnabled(booleannewValue, MasterSwitchTypeswitchType) - + boolean MasterCoprocessorHost.preSetSplitOrMergeEnabled(booleannewValue, MasterSwitchTypeswitchType) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html index 275c9a6..acec8b0 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html @@ -452,7 +452,9 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. RegionObserver.preSplitBeforePONR(ObserverContextRegionCoprocessorEnvironmentctx, byte[]splitKey, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListMutationmetaEntries) -Deprecated. +Deprecated. +No longer called in hbase2/AMv2 given the master runs splits now; + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html b/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html index b53ee9d..061614e 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html +++ b/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html @@ -148,7 +148,7 @@ extends org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterSer Methods inherited from interfaceorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface -abortProcedure, addColumn, addReplicationPeer, assignRegion, balance, createNamespace, createTable, deleteColumn, deleteNamespace, deleteSnapshot, deleteTable, disableReplicationPeer, disableTable, drainRegionServers, enableCatalogJanitor, enableReplicationPeer, enableTable, execMasterService, execProcedure, execProcedureWithRet, getClusterStatus, getCompletedSnapshots, getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, getNamespaceDescriptor, getProcedureResult, getReplicationPeerConfig, getSchemaAlterStatus, getSecurityCapabilities, getTableDescriptors, getTableNames, getTableState, isBalancerEnabled, isCatalogJanitorEnabled, isCleanerChoreEnabled, isMasterInMaintenanceMode, isMasterRunning, isNormalizerEnabled, isProcedureDone, isSnapshotDone, isSplitOrMergeEnabled, listDrainingRegionServers, listLocks, listNamespaceDescriptors, listProcedures, listReplicationPeers, listTableDescriptorsByNamespace, listTableNamesByNamespace, mergeTableRegions, modify Column, modifyNamespace, modifyTable, moveRegion, normalize, offlineRegion, removeDrainFromRegionServers, removeReplicationPeer, restoreSnapshot, runCatalogScan, runCleanerChore, setBalancerRunning, setCleanerChoreRunning, setNormalizerRunning, setQuota, setSplitOrMergeEnabled, shutdown, snapshot, stopMaster, truncateTable, unassignRegion, updateReplicationPeerConfig +abortProcedure, addColumn, addReplicationPeer, assignRegion, balance, createNamespace, createTable, deleteColumn, deleteNamespace, deleteSnapshot, deleteTable, disableReplicationPeer, disableTable, drainRegionServers, enableCatalogJanitor, enableReplicationPeer, enableTable, execMasterService, execProcedure, execProcedureWithRet, getClusterStatus, getCompletedSnapshots, getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, getNamespaceDescriptor, getProcedureResult, getQuotaStates, getReplicationPeerConfig, getSchemaAlterStatus, getSecurityCapabilities, getSpaceQuotaRegionSizes, getTableDescriptors, getTableNames, getTableState, isBalancerEnabled, isCatalogJanitorEnabled, isCleanerChoreEnabled, isMasterInMaintenanceMode, isMasterRunning, isNormalizerEnabled, isProcedureDone, isSnapshotDone, isSplitOrMergeEnabled, listDrainingRegionServers, listLocks, listNamespaceDescriptors, listProcedures, listReplicationPeers, listTableDescriptorsByNamespace, listTableN amesByNamespace, mergeTableRegions, modifyColumn, modifyNamespace, modifyTable, moveRegion, normalize, offlineRegion, removeDrainFromRegionServers, removeReplicationPeer, restoreSnapshot, runCatalogScan, runCleanerChore, setBalancerRunning, setCleanerChoreRunning, setNormalizerRunning, setQuota, setSplitOrMergeEnabled, shutdown, snapshot, stopMaster, truncateTable, unassignRegion, updateReplicationPeerConfig http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/Query.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/Query.html b/devapidocs/org/apache/hadoop/hbase/client/Query.html index 65752f6..6485695 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/Query.html +++ b/devapidocs/org/apache/hadoop/hbase/client/Query.html @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab"; PrevClass -NextClass +NextClass Frames @@ -832,7 +832,7 @@ extends PrevClass -NextClass +NextClass Frames http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html new file mode 100644 index 000..d0a444e --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html @@ -0,0 +1,415 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +QuotaStatusCalls (Apache HBase 2.0.0-SNAPSHOT API) + + + + + -var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -113,7 +113,8 @@ var activeTableTab = "activeTableTab"; -public final class Canary +@InterfaceAudience.Private +public final class Canary extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements org.apache.hadoop.util.Tool HBase Canary Tool, that that can be used to do @@ -406,39 +407,6 @@ implements org.apache.hadoop.util.Tool Canary entry point for specified table. - -static void -sniff(Adminadmin, - TableNametableName) -Canary entry point for specified table. - - - -static void -sniff(Adminadmin, - TableNametableName, - booleanrawScanEnabled) -Canary entry point for specified table. - - - -static void -sniff(Adminadmin, - TableNametableName, - Canary.RegionTask.TaskTypetaskType) -Canary entry point for specified table with task type(read/write) - Keeping this method backward compatible - - - -static void -sniff(Adminadmin, - TableNametableName, - Canary.RegionTask.TaskTypetaskType, - booleanrawScanEnabled) -Canary entry point for specified table with task type(read/write) - - @@ -467,7 +435,7 @@ implements org.apache.hadoop.util.Tool USAGE_EXIT_CODE -private static finalint USAGE_EXIT_CODE +private static finalint USAGE_EXIT_CODE See Also: Constant Field Values @@ -480,7 +448,7 @@ implements org.apache.hadoop.util.Tool INIT_ERROR_EXIT_CODE -private static finalint INIT_ERROR_EXIT_CODE +private static finalint INIT_ERROR_EXIT_CODE See Also: Constant Field Values @@ -493,7 +461,7 @@ implements org.apache.hadoop.util.Tool TIMEOUT_ERROR_EXIT_CODE -private static finalint TIMEOUT_ERROR_EXIT_CODE +private static finalint TIMEOUT_ERROR_EXIT_CODE See Also: Constant Field Values @@ -506,7 +474,7 @@ implements org.apache.hadoop.util.Tool ERROR_EXIT_CODE -private static finalint ERROR_EXIT_CODE +private static finalint ERROR_EXIT_CODE See Also: Constant Field Values @@ -519,7 +487,7 @@ implements org.apache.hadoop.util.Tool FAILURE_EXIT_CODE -private static finalint FAILURE_EXIT_CODE +private static finalint FAILURE_EXIT_CODE See Also: Constant Field Values @@ -532,7 +500,7 @@ implements org.apache.hadoop.util.Tool DEFAULT_INTERVAL -private static finallong DEFAULT_INTERVAL +private static finallong DEFAULT_INTERVAL See Also: Constant Field Values @@ -545,7 +513,7 @@ implements org.apache.hadoop.util.Tool DEFAULT_TIMEOUT -private static finallong DEFAULT_TIMEOUT +private static finallong DEFAULT_TIMEOUT See Also: Constant Field Values @@ -558,7 +526,7 @@ implements org.apache.hadoop.util.Tool MAX_THREADS_NUM -private static finalint MAX_THREADS_NUM +private static finalint MAX_THREADS_NUM See Also: Constant Field Values @@ -571,7 +539,7 @@ implements org.apache.hadoop.util.Tool LOG -private static finalorg.apache.commons.logging.Log LOG +private static finalorg.apache.commons.logging.Log LOG @@ -580,7 +548,7 @@ implements org.apache.hadoop.util.Tool DEFAULT_WRITE_TABLE_NAME -public static finalTableName DEFAULT_WRITE_TABLE_NAME +public static finalTableName DEFAULT_WRITE_TABLE_NAME @@ -589,7 +557,7 @@ implements org.apache.hadoop.util.Tool CANARY_TABLE_FAMILY_NAME -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String CANARY_TABLE_FAMILY_NAME +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String CANARY_TABLE_FAMILY_NAME See Also: Constant Field Values @@ -602,7 +570,7 @@ implements org.apache.hadoop.util.Tool conf -privateorg.apache.hadoop.conf.Configuration conf +privateorg.apache.hadoop.conf.Configuration conf @@ -611,7 +579,7 @@ implements org.apache.hadoop.util.Tool interval -privatelong interval +privatelong interval @@ -620,7 +588,7 @@ implements org.apache.hadoop.util.Tool sink -privateCanary.Sink sink +privateCanary.Sink sink
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html new file mode 100644 index 000..32185d0 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html @@ -0,0 +1,508 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +ChunkCreator.MemStoreChunkPool (Apache HBase 2.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":10,"i1":10,"i2":10,"i3":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.regionserver +Class ChunkCreator.MemStoreChunkPool + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool + + + + + + + +All Implemented Interfaces: +HeapMemoryManager.HeapMemoryTuneObserver + + +Enclosing class: +ChunkCreator + + + +private class ChunkCreator.MemStoreChunkPool +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +implements HeapMemoryManager.HeapMemoryTuneObserver +A pool of Chunk instances. + + MemStoreChunkPool caches a number of retired chunks for reusing, it could + decrease allocating bytes when writing, thereby optimizing the garbage + collection on JVM. + + + + + + + + + + + +Nested Class Summary + +Nested Classes + +Modifier and Type +Class and Description + + +private class +ChunkCreator.MemStoreChunkPool.StatisticsThread + + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong +chunkCount + + +private int +maxCount + + +private float +poolSizePercentage + + +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true; title="class or interface in java.util.concurrent">BlockingQueueChunk +reclaimedChunks + + +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong +reusedChunkCount + + +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true; title="class or interface in java.util.concurrent">ScheduledExecutorService +scheduleThreadPool +Statistics thread schedule pool + + + +private static int +statThreadPeriod +Statistics thread + + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +MemStoreChunkPool(intmaxCount, + intinitialCount, + floatpoolSizePercentage) + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +(package private) Chunk +getChunk() +Poll a chunk from the pool, reset it if not null, else create a new chunk to return if we have + not yet created max allowed chunks count. + + + +private int +getMaxCount() + + +void +onHeapMemoryTune(longnewMemstoreSize, +longnewBlockCacheSize) +This method would be called by HeapMemoryManger when a heap memory tune action took place. + + + +private void +putbackChunks(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true; title="class or interface in java.lang">Integerchunks) +Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining + chunks + + + + + + +
[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html -- diff --git a/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html b/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html deleted file mode 100644 index 645a51b..000 --- a/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html +++ /dev/null @@ -1,334 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -TableNotEnabledException (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase -Class TableNotEnabledException - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">java.lang.Throwable - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">java.lang.Exception - - -http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">java.io.IOException - - -org.apache.hadoop.hbase.HBaseIOException - - -org.apache.hadoop.hbase.DoNotRetryIOException - - -org.apache.hadoop.hbase.TableNotEnabledException - - - - - - - - - - - - - - - - - -All Implemented Interfaces: -http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable - - - -@InterfaceAudience.Public -public class TableNotEnabledException -extends DoNotRetryIOException -Thrown if a table should be enabled but is not - -See Also: -Serialized Form - - - - - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -TableNotEnabledException() -default constructor - - - -TableNotEnabledException(byte[]tableName) - - -TableNotEnabledException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Strings) -Constructor - - - -TableNotEnabledException(TableNametableName) - - - - - - - - - -Method Summary - - - - -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwable -http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-; title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--; title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--; title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--; title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--; title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--; title="class or inter face in java.lang">getStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--; title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-; title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--; title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-; title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-; title="class or interface in java.lang">printStackTrace,