http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 8ea63cd..1957877 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -1,10 +1,10 @@
http://www.w3.org/TR/html4/loose.dtd;>
-
+
-ç±» org.apache.hadoop.hbase.HRegionInfoçä½¿ç¨ (Apache HBase
3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.HRegionInfo (Apache HBase
3.0.0-SNAPSHOT API)
@@ -12,7 +12,7 @@
-æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã
+JavaScript is disabled on your browser.
-è·³è¿å¯¼èªé¾æ¥
+Skip navigation links
-
-æ¦è§
-ç¨åºå
-ç±»
-使ç¨
-æ
-å·²è¿æ¶
-ç´¢å¼
-帮å©
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
-ä¸ä¸ä¸ª
-ä¸ä¸ä¸ª
+Prev
+Next
-æ¡æ¶
-æ æ¡æ¶
+Frames
+NoFrames
-ææç±»
+AllClasses
-
-
-使ç¨HRegionInfoçç¨åºå
+
+Packages that use HRegionInfo
-ç¨åºå
|
-说æ |
+Package |
+Description |
@@ -89,17 +89,7 @@
org.apache.hadoop.hbase.client |
-
+ Provides HBase Client
|
@@ -110,34 +100,34 @@ Table of Contents
-
-
-
-声æ为HRegionInfoçorg.apache.hadoop.hbaseä¸çå段
+
+
+Fields in org.apache.hadoop.hbase
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
index 9b34e4d..03beb7e 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
@@ -1,10 +1,10 @@
http://www.w3.org/TR/html4/loose.dtd;>
-
+
-Uses of Class org.apache.hadoop.hbase.HColumnDescriptor (Apache HBase
3.0.0-SNAPSHOT API)
+ç±» org.apache.hadoop.hbase.HColumnDescriptorçä½¿ç¨ (Apache HBase
3.0.0-SNAPSHOT API)
@@ -12,7 +12,7 @@
-JavaScript is disabled on your browser.
+æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã
-Skip navigation links
+è·³è¿å¯¼èªé¾æ¥
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+æ¦è§
+ç¨åºå
+ç±»
+使ç¨
+æ
+å·²è¿æ¶
+ç´¢å¼
+帮å©
-Prev
-Next
+ä¸ä¸ä¸ª
+ä¸ä¸ä¸ª
-Frames
-NoFrames
+æ¡æ¶
+æ æ¡æ¶
-AllClasses
+ææç±»
-
-
-Packages that use HColumnDescriptor
+
+使ç¨HColumnDescriptorçç¨åºå
-Package |
-Description |
+ç¨åºå
|
+说æ |
@@ -94,70 +94,70 @@
-
-
-
-Methods in org.apache.hadoop.hbase
that return HColumnDescriptor
+
+
+è¿åHColumnDescriptorçorg.apache.hadoop.hbaseä¸çæ¹æ³
-Modifier and Type |
-Method and Description |
+éå®ç¬¦åç±»å |
+æ¹æ³å说æ |
-HColumnDescriptor[] |
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
index 67f4551..017124c 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
@@ -387,817 +387,804 @@
379}
380
381LruCachedBlock cb =
map.get(cacheKey);
-382if (cb != null) {
-383 int comparison =
BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
-384 if (comparison != 0) {
-385if (comparison 0) {
-386 LOG.warn("Cached block contents
differ by nextBlockOnDiskSize. Keeping cached block.");
-387 return;
-388} else {
-389 LOG.warn("Cached block contents
differ by nextBlockOnDiskSize. Caching new block.");
-390}
-391 } else {
-392String msg = "Cached an already
cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-393msg += ". This is harmless and
can happen in rare cases (see HBASE-8547)";
-394LOG.debug(msg);
-395return;
-396 }
-397}
-398long currentSize = size.get();
-399long currentAcceptableSize =
acceptableSize();
-400long hardLimitSize = (long)
(hardCapacityLimitFactor * currentAcceptableSize);
-401if (currentSize = hardLimitSize)
{
-402 stats.failInsert();
-403 if (LOG.isTraceEnabled()) {
-404LOG.trace("LruBlockCache current
size " + StringUtils.byteDesc(currentSize)
-405 + " has exceeded acceptable
size " + StringUtils.byteDesc(currentAcceptableSize) + "."
-406 + " The hard limit size is " +
StringUtils.byteDesc(hardLimitSize)
-407 + ", failed to put cacheKey:" +
cacheKey + " into LruBlockCache.");
-408 }
-409 if (!evictionInProgress) {
-410runEviction();
-411 }
-412 return;
-413}
-414cb = new LruCachedBlock(cacheKey,
buf, count.incrementAndGet(), inMemory);
-415long newSize = updateSizeMetrics(cb,
false);
-416map.put(cacheKey, cb);
-417long val =
elements.incrementAndGet();
-418if (buf.getBlockType().isData()) {
-419 dataBlockElements.increment();
-420}
-421if (LOG.isTraceEnabled()) {
-422 long size = map.size();
-423 assertCounterSanity(size, val);
-424}
-425if (newSize
currentAcceptableSize !evictionInProgress) {
-426 runEviction();
-427}
-428 }
-429
-430 /**
-431 * Sanity-checking for parity between
actual block cache content and metrics.
-432 * Intended only for use with TRACE
level logging and -ea JVM.
-433 */
-434 private static void
assertCounterSanity(long mapSize, long counterVal) {
-435if (counterVal 0) {
-436 LOG.trace("counterVal overflow.
Assertions unreliable. counterVal=" + counterVal +
-437", mapSize=" + mapSize);
-438 return;
-439}
-440if (mapSize Integer.MAX_VALUE)
{
-441 double pct_diff =
Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
-442 if (pct_diff 0.05) {
-443LOG.trace("delta between reported
and actual size 5%. counterVal=" + counterVal +
-444 ", mapSize=" + mapSize);
-445 }
-446}
-447 }
-448
-449 /**
-450 * Cache the block with the specified
name and buffer.
-451 * p
-452 *
-453 * @param cacheKey block's cache key
-454 * @param buf block buffer
-455 */
-456 @Override
-457 public void cacheBlock(BlockCacheKey
cacheKey, Cacheable buf) {
-458cacheBlock(cacheKey, buf, false);
-459 }
-460
-461 /**
-462 * Helper function that updates the
local size counter and also updates any
-463 * per-cf or per-blocktype metrics it
can discern from given
-464 * {@link LruCachedBlock}
-465 */
-466 private long
updateSizeMetrics(LruCachedBlock cb, boolean evict) {
-467long heapsize = cb.heapSize();
-468BlockType bt =
cb.getBuffer().getBlockType();
-469if (evict) {
-470 heapsize *= -1;
-471}
-472if (bt != null
bt.isData()) {
-473 dataBlockSize.add(heapsize);
-474}
-475return size.addAndGet(heapsize);
-476 }
-477
-478 /**
-479 * Get the buffer of the block with the
specified name.
-480 *
-481 * @param cacheKey block's
cache key
-482 * @param cachingtrue if
the caller caches blocks on cache misses
-483 * @param repeat Whether
this is a repeat lookup for the same block
-484 * (used to
avoid double counting cache misses when doing double-check
-485 * locking)
-486 * @param updateCacheMetrics Whether to
update cache metrics or not
-487 *
-488 *
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index c10cfbf..a3e2f4a 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -3371,7 +3371,7 @@
3363private V result = null;
3364
3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
3367
3368public ProcedureFuture(final
HBaseAdmin admin, final Long procId) {
3369 this.admin = admin;
@@ -3653,653 +3653,651 @@
3645 * @return a description of the
operation
3646 */
3647protected String getDescription()
{
-3648 return "Operation: " +
getOperationType() + ", "
-3649 + "Table Name: " +
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class
TableWaitForStateCallable implements WaitForStateCallable {
-3654 @Override
-3655 public void
throwInterruptedException() throws InterruptedIOException {
-3656throw new
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on
table: " + tableName.getNameWithNamespaceInclAsString());
-3658 }
-3659
-3660 @Override
-3661 public void
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString()
+ " has not completed after " + elapsedTime + "ms");
-3664 }
-3665}
-3666
-3667@Override
-3668protected V
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException,
TimeoutException {
-3670 LOG.info(getDescription() + "
completed");
-3671 return
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException,
TimeoutException {
-3677 LOG.info(getDescription() + "
failed with " + exception.getMessage());
-3678 return
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void
waitForTableEnabled(final long deadlineTs)
-3682throws IOException,
TimeoutException {
-3683 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int
tries) throws IOException {
-3686 try {
-3687if
(getAdmin().isTableAvailable(tableName)) {
-3688 return true;
-3689}
-3690 } catch
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " +
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled,
sleeping. tries=" + tries);
-3693 }
-3694 return false;
-3695}
-3696 });
-3697}
-3698
-3699protected void
waitForTableDisabled(final long deadlineTs)
-3700throws IOException,
TimeoutException {
-3701 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int
tries) throws IOException {
-3704 return
getAdmin().isTableDisabled(tableName);
-3705}
-3706 });
-3707}
-3708
-3709protected void
waitTableNotFound(final long deadlineTs)
-3710throws IOException,
TimeoutException {
-3711 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int
tries) throws IOException {
-3714 return
!getAdmin().tableExists(tableName);
-3715}
-3716 });
-3717}
-3718
-3719protected void
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException,
TimeoutException {
-3721 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int
tries) throws IOException {
-3724 return
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726 });
-3727}
-3728
-3729protected void
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException,
TimeoutException {
-3731 final TableDescriptor desc =
getTableDescriptor();
-3732 final AtomicInteger actualRegCount
= new AtomicInteger(0);
-3733 final MetaTableAccessor.Visitor
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result
rowResult) throws
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
new file mode 100644
index 000..c449893
--- /dev/null
+++
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
@@ -0,0 +1,700 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SyncReplicationReplayWALManager (Apache HBase 3.0.0-SNAPSHOT
API)
+
+
+
+
+
+var methods =
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.replication
+Class
SyncReplicationReplayWALManager
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class SyncReplicationReplayWALManager
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private org.apache.hadoop.fs.FileSystem
+fs
+
+
+private static org.slf4j.Logger
+LOG
+
+
+private org.apache.hadoop.fs.Path
+remoteWALDir
+
+
+private MasterServices
+services
+
+
+private org.apache.hadoop.fs.Path
+walRootDir
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
+workerLock
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetServerName
+workers
+
+
+private ZKSyncReplicationReplayWALWorkerStorage
+workerStorage
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SyncReplicationReplayWALManager(MasterServicesservices)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+private void
+checkReplayingWALDir()
+
+
+void
+createPeerRemoteWALDir(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringpeerId)
+
+
+private void
+deleteDir(org.apache.hadoop.fs.Pathdir,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringpeerId)
+
+
+void
+finishReplayWAL(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringwal)
+
+
+ServerName
+getPeerWorker(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringpeerId)
+
+
+org.apache.hadoop.fs.Path
+getRemoteWALDir()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.Path
+getReplayWALsAndCleanUpUnusedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringpeerId)
+
+
+boolean
+isReplayWALFinished(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringwal)
+
+
+void
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
index 49e37f9..3d2c9cb 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
@@ -65,392 +65,410 @@
057
.setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
058.setMemStoreSize(new
Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
059
.setReadRequestCount(regionLoadPB.getReadRequestsCount())
-060
.setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
-061.setStoreFileIndexSize(new
Size(regionLoadPB.getStorefileIndexSizeKB(),
-062 Size.Unit.KILOBYTE))
-063
.setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
-064 Size.Unit.KILOBYTE))
-065
.setStoreCount(regionLoadPB.getStores())
-066
.setStoreFileCount(regionLoadPB.getStorefiles())
-067.setStoreFileSize(new
Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
-068
.setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
-069 .collect(Collectors.toMap(
-070
(ClusterStatusProtos.StoreSequenceId s) -
s.getFamilyName().toByteArray(),
-071
ClusterStatusProtos.StoreSequenceId::getSequenceId)))
-072.setUncompressedStoreFileSize(
-073 new
Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
-074.build();
-075 }
-076
-077 private static
ListClusterStatusProtos.StoreSequenceId toStoreSequenceId(
-078 Mapbyte[], Long ids) {
-079return ids.entrySet().stream()
-080.map(e -
ClusterStatusProtos.StoreSequenceId.newBuilder()
-081
.setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey()))
-082 .setSequenceId(e.getValue())
-083 .build())
-084.collect(Collectors.toList());
-085 }
-086
-087 public static
ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
-088return
ClusterStatusProtos.RegionLoad.newBuilder()
-089
.setRegionSpecifier(HBaseProtos.RegionSpecifier
-090
.newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
-091
.setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
-092 .build())
-093.setTotalStaticBloomSizeKB((int)
regionMetrics.getBloomFilterSize()
-094 .get(Size.Unit.KILOBYTE))
-095
.setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
-096
.setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
-097
.setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
-098
.setDataLocality(regionMetrics.getDataLocality())
-099
.setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
-100.setTotalStaticIndexSizeKB((int)
regionMetrics.getStoreFileUncompressedDataIndexSize()
-101 .get(Size.Unit.KILOBYTE))
-102
.setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
-103.setMemStoreSizeMB((int)
regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
-104
.setReadRequestsCount(regionMetrics.getReadRequestCount())
-105
.setWriteRequestsCount(regionMetrics.getWriteRequestCount())
-106.setStorefileIndexSizeKB((long)
regionMetrics.getStoreFileIndexSize()
-107 .get(Size.Unit.KILOBYTE))
-108.setRootIndexSizeKB((int)
regionMetrics.getStoreFileRootLevelIndexSize()
+060
.setCpRequestCount(regionLoadPB.getCpRequestsCount())
+061
.setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
+062.setStoreFileIndexSize(new
Size(regionLoadPB.getStorefileIndexSizeKB(),
+063 Size.Unit.KILOBYTE))
+064
.setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
+065 Size.Unit.KILOBYTE))
+066
.setStoreCount(regionLoadPB.getStores())
+067
.setStoreFileCount(regionLoadPB.getStorefiles())
+068.setStoreFileSize(new
Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
+069
.setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
+070 .collect(Collectors.toMap(
+071
(ClusterStatusProtos.StoreSequenceId s) -
s.getFamilyName().toByteArray(),
+072
ClusterStatusProtos.StoreSequenceId::getSequenceId)))
+073.setUncompressedStoreFileSize(
+074 new
Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
+075.build();
+076 }
+077
+078
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index 541beed..1100e95 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -42,1015 +42,1038 @@
034import
java.util.concurrent.ConcurrentHashMap;
035import
java.util.concurrent.ConcurrentSkipListMap;
036import
java.util.concurrent.atomic.AtomicInteger;
-037
-038import
org.apache.hadoop.hbase.HConstants;
-039import
org.apache.hadoop.hbase.ServerName;
-040import
org.apache.hadoop.hbase.TableName;
-041import
org.apache.hadoop.hbase.client.RegionInfo;
-042import
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-043import
org.apache.hadoop.hbase.master.RegionState;
-044import
org.apache.hadoop.hbase.master.RegionState.State;
-045import
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-046import
org.apache.hadoop.hbase.util.Bytes;
-047import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-048import
org.apache.yetus.audience.InterfaceAudience;
-049import org.slf4j.Logger;
-050import org.slf4j.LoggerFactory;
-051import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-052
-053/**
-054 * RegionStates contains a set of Maps
that describes the in-memory state of the AM, with
-055 * the regions available in the system,
the region in transition, the offline regions and
-056 * the servers holding regions.
-057 */
-058@InterfaceAudience.Private
-059public class RegionStates {
-060 private static final Logger LOG =
LoggerFactory.getLogger(RegionStates.class);
-061
-062 protected static final State[]
STATES_EXPECTED_ON_OPEN = new State[] {
-063State.OPEN, // State may already be
OPEN if we died after receiving the OPEN from regionserver
-064// but before complete
finish of AssignProcedure. HBASE-20100.
-065State.OFFLINE, State.CLOSED, //
disable/offline
-066State.SPLITTING, State.SPLIT, //
ServerCrashProcedure
-067State.OPENING, State.FAILED_OPEN, //
already in-progress (retrying)
-068 };
-069
-070 protected static final State[]
STATES_EXPECTED_ON_CLOSE = new State[] {
-071State.SPLITTING, State.SPLIT,
State.MERGING, // ServerCrashProcedure
-072State.OPEN, //
enabled/open
-073State.CLOSING //
already in-progress (retrying)
-074 };
-075
-076 private static class
AssignmentProcedureEvent extends ProcedureEventRegionInfo {
-077public AssignmentProcedureEvent(final
RegionInfo regionInfo) {
-078 super(regionInfo);
-079}
-080 }
-081
-082 private static class ServerReportEvent
extends ProcedureEventServerName {
-083public ServerReportEvent(final
ServerName serverName) {
-084 super(serverName);
-085}
-086 }
-087
-088 /**
-089 * Current Region State.
-090 * In-memory only. Not persisted.
-091 */
-092 // Mutable/Immutable? Changes have to
be synchronized or not?
-093 // Data members are volatile which
seems to say multi-threaded access is fine.
-094 // In the below we do check and set but
the check state could change before
-095 // we do the set because no
synchronizationwhich seems dodgy. Clear up
-096 // understanding here... how many
threads accessing? Do locks make it so one
-097 // thread at a time working on a single
Region's RegionStateNode? Lets presume
-098 // so for now. Odd is that elsewhere in
this RegionStates, we synchronize on
-099 // the RegionStateNode instance.
TODO.
-100 public static class RegionStateNode
implements ComparableRegionStateNode {
-101private final RegionInfo
regionInfo;
-102private final ProcedureEvent?
event;
-103
-104private volatile
RegionTransitionProcedure procedure = null;
-105private volatile ServerName
regionLocation = null;
-106private volatile ServerName lastHost
= null;
-107/**
-108 * A Region-in-Transition (RIT) moves
through states.
-109 * See {@link State} for complete
list. A Region that
-110 * is opened moves from OFFLINE =
OPENING = OPENED.
-111 */
-112private volatile State state =
State.OFFLINE;
-113
-114/**
-115 * Updated whenever a call to {@link
#setRegionLocation(ServerName)}
-116 * or {@link #setState(State,
State...)}.
-117 */
-118private volatile long lastUpdate =
0;
-119
-120private volatile long openSeqNum =
HConstants.NO_SEQNUM;
-121
-122public RegionStateNode(final
RegionInfo regionInfo) {
-123 this.regionInfo = regionInfo;
-124 this.event = new
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 4b5d00c..96ecbf8 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -6,7 +6,7 @@
-001/*
+001/**
002 * Licensed to the Apache Software
Foundation (ASF) under one
003 * or more contributor license
agreements. See the NOTICE file
004 * distributed with this work for
additional information
@@ -23,1981 +23,1894 @@
015 * See the License for the specific
language governing permissions and
016 * limitations under the License.
017 */
-018
-019package
org.apache.hadoop.hbase.master.assignment;
-020
-021import java.io.IOException;
-022import java.util.ArrayList;
-023import java.util.Arrays;
-024import java.util.Collection;
-025import java.util.Collections;
-026import java.util.HashMap;
-027import java.util.HashSet;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Set;
-031import
java.util.concurrent.CopyOnWriteArrayList;
-032import java.util.concurrent.Future;
-033import java.util.concurrent.TimeUnit;
-034import
java.util.concurrent.atomic.AtomicBoolean;
-035import
java.util.concurrent.locks.Condition;
-036import
java.util.concurrent.locks.ReentrantLock;
-037import java.util.stream.Collectors;
-038import
org.apache.hadoop.conf.Configuration;
-039import
org.apache.hadoop.hbase.HBaseIOException;
-040import
org.apache.hadoop.hbase.HConstants;
-041import
org.apache.hadoop.hbase.PleaseHoldException;
-042import
org.apache.hadoop.hbase.RegionException;
-043import
org.apache.hadoop.hbase.RegionStateListener;
-044import
org.apache.hadoop.hbase.ServerName;
-045import
org.apache.hadoop.hbase.TableName;
-046import
org.apache.hadoop.hbase.YouAreDeadException;
-047import
org.apache.hadoop.hbase.client.RegionInfo;
-048import
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-049import
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import
org.apache.hadoop.hbase.client.Result;
-051import
org.apache.hadoop.hbase.client.TableState;
-052import
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-053import
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-054import
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-055import
org.apache.hadoop.hbase.master.AssignmentListener;
-056import
org.apache.hadoop.hbase.master.LoadBalancer;
-057import
org.apache.hadoop.hbase.master.MasterServices;
-058import
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
-059import
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-060import
org.apache.hadoop.hbase.master.RegionPlan;
-061import
org.apache.hadoop.hbase.master.RegionState;
-062import
org.apache.hadoop.hbase.master.RegionState.State;
-063import
org.apache.hadoop.hbase.master.ServerListener;
-064import
org.apache.hadoop.hbase.master.TableStateManager;
-065import
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-066import
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
-067import
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
-068import
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
-069import
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-070import
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-071import
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-072import
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-073import
org.apache.hadoop.hbase.master.procedure.ServerCrashException;
-074import
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-075import
org.apache.hadoop.hbase.procedure2.Procedure;
-076import
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-077import
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-078import
org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
-079import
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-080import
org.apache.hadoop.hbase.regionserver.SequenceId;
-081import
org.apache.hadoop.hbase.util.Bytes;
-082import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-083import
org.apache.hadoop.hbase.util.HasThread;
-084import
org.apache.hadoop.hbase.util.Pair;
-085import
org.apache.hadoop.hbase.util.Threads;
-086import
org.apache.hadoop.hbase.util.VersionInfo;
-087import
org.apache.yetus.audience.InterfaceAudience;
-088import org.slf4j.Logger;
-089import org.slf4j.LoggerFactory;
-090
-091import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-092
-093import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
index e31f5c6..f4d1eb0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
@@ -31,277 +31,266 @@
023import java.util.Set;
024import
org.apache.hadoop.hbase.HConstants;
025import
org.apache.hadoop.hbase.client.RegionInfo;
-026import
org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
-027import
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-028import
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-029import
org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
-030import
org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
-031import
org.apache.yetus.audience.InterfaceAudience;
-032import
org.apache.yetus.audience.InterfaceStability;
-033
-034import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-035
-036/**
-037 * A Write Ahead Log (WAL) provides
service for reading, writing waledits. This interface provides
-038 * APIs for WAL users (such as
RegionServer) to use the WAL (do append, sync, etc).
-039 *
-040 * Note that some internals, such as log
rolling and performance evaluation tools, will use
-041 * WAL.equals to determine if they have
already seen a given WAL.
-042 */
-043@InterfaceAudience.Private
-044@InterfaceStability.Evolving
-045public interface WAL extends Closeable,
WALFileLengthProvider {
-046
-047 /**
-048 * Registers WALActionsListener
-049 */
-050 void registerWALActionsListener(final
WALActionsListener listener);
-051
-052 /**
-053 * Unregisters WALActionsListener
-054 */
-055 boolean
unregisterWALActionsListener(final WALActionsListener listener);
-056
-057 /**
-058 * Roll the log writer. That is, start
writing log messages to a new file.
-059 *
-060 * p
-061 * The implementation is synchronized
in order to make sure there's one rollWriter
-062 * running at any given time.
-063 *
-064 * @return If lots of logs, flush the
returned regions so next time through we
-065 * can clean logs. Returns null
if nothing to flush. Names are actual
-066 * region names as returned by
{@link RegionInfo#getEncodedName()}
-067 */
-068 byte[][] rollWriter() throws
FailedLogCloseException, IOException;
-069
-070 /**
-071 * Roll the log writer. That is, start
writing log messages to a new file.
-072 *
-073 * p
-074 * The implementation is synchronized
in order to make sure there's one rollWriter
-075 * running at any given time.
-076 *
-077 * @param force
-078 * If true, force creation of
a new writer even if no entries have
-079 * been written to the current
writer
-080 * @return If lots of logs, flush the
returned regions so next time through we
-081 * can clean logs. Returns null
if nothing to flush. Names are actual
-082 * region names as returned by
{@link RegionInfo#getEncodedName()}
-083 */
-084 byte[][] rollWriter(boolean force)
throws FailedLogCloseException, IOException;
-085
-086 /**
-087 * Stop accepting new writes. If we
have unsynced writes still in buffer, sync them.
-088 * Extant edits are left in place in
backing storage to be replayed later.
-089 */
-090 void shutdown() throws IOException;
-091
-092 /**
-093 * Caller no longer needs any edits
from this WAL. Implementers are free to reclaim
-094 * underlying resources after this
call; i.e. filesystem based WALs can archive or
-095 * delete files.
-096 */
-097 @Override
-098 void close() throws IOException;
-099
-100 /**
-101 * Append a set of edits to the WAL.
The WAL is not flushed/sync'd after this transaction
-102 * completes BUT on return this edit
must have its region edit/sequence id assigned
-103 * else it messes up our unification of
mvcc and sequenceid. On return codekey/code will
-104 * have the region edit/sequence id
filled in.
-105 * @param info the regioninfo
associated with append
-106 * @param key Modified by this call; we
add to it this edits region edit/sequence id.
-107 * @param edits Edits to append. MAY
CONTAIN NO EDITS for case where we want to get an edit
-108 * sequence id that is after all
currently appended edits.
-109 * @param inMemstore Always true except
for case where we are writing a compaction completion
-110 * record into the WAL; in this case
the entry is just so we can finish an unfinished compaction
-111 * -- it is not an edit for memstore.
-112 * @return Returns a 'transaction id'
and codekey/code will have the region edit/sequence id
-113 * in it.
-114 */
-115 long append(RegionInfo info, WALKeyImpl
key,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
index 594ef24..17d5c40 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
@@ -170,241 +170,242 @@
162 }
163
164 /**
-165 * Add a remote rpc. Be sure to check
result for successful add.
+165 * Add a remote rpc.
166 * @param key the node identifier
-167 * @return True if we successfully
added the operation.
-168 */
-169 public boolean addOperationToNode(final
TRemote key, RemoteProcedure rp) {
+167 */
+168 public void addOperationToNode(final
TRemote key, RemoteProcedure rp)
+169 throws
NullTargetServerDispatchException, NoServerDispatchException,
NoNodeDispatchException {
170if (key == null) {
-171 // Key is remote server name. Be
careful. It could have been nulled by a concurrent
-172 // ServerCrashProcedure shutting
down outstanding RPC requests. See remoteCallFailed.
-173 return false;
-174}
-175assert key != null : "found null key
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178 return false;
-179}
-180node.add(rp);
-181// Check our node still in the map;
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183 }
-184
-185 /**
-186 * Remove a remote node
-187 * @param key the node identifier
-188 */
-189 public boolean removeNode(final TRemote
key) {
-190final BufferNode node =
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194 }
-195
-196 //
-197 // Task Helpers
-198 //
-199 protected FutureVoid
submitTask(CallableVoid task) {
-200return threadPool.submit(task);
-201 }
-202
-203 protected FutureVoid
submitTask(CallableVoid task, long delay, TimeUnit unit) {
-204final FutureTaskVoid
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207 }
-208
-209 protected abstract void
remoteDispatch(TRemote key, SetRemoteProcedure operations);
-210 protected abstract void
abortPendingOperations(TRemote key, SetRemoteProcedure operations);
-211
-212 /**
-213 * Data structure with reference to
remote operation.
-214 */
-215 public static abstract class
RemoteOperation {
-216private final RemoteProcedure
remoteProcedure;
-217
-218protected RemoteOperation(final
RemoteProcedure remoteProcedure) {
-219 this.remoteProcedure =
remoteProcedure;
-220}
-221
-222public RemoteProcedure
getRemoteProcedure() {
-223 return remoteProcedure;
-224}
-225 }
-226
-227 /**
-228 * Remote procedure reference.
-229 */
-230 public interface
RemoteProcedureTEnv, TRemote {
-231/**
-232 * For building the remote
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure
call is failed.
-238 */
-239void remoteCallFailed(TEnv env,
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote
procedure is succeeded through the
-243 * {@code reportProcedureDone}
method.
-244 */
-245void remoteOperationCompleted(TEnv
env);
-246
-247/**
-248 * Called when RS tells the remote
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env,
RemoteProcedureException error);
-252 }
-253
-254 /**
-255 * Account of what procedures are
running on remote node.
-256 * @param TEnv
-257 * @param TRemote
-258 */
-259 public interface RemoteNodeTEnv,
TRemote {
-260TRemote getKey();
-261void add(RemoteProcedureTEnv,
TRemote operation);
-262void dispatch();
-263 }
-264
-265 protected
ArrayListMultimapClass?, RemoteOperation
buildAndGroupRequestByType(final TEnv env,
-266 final TRemote remote, final
SetRemoteProcedure remoteProcedures) {
-267final
ArrayListMultimapClass?, RemoteOperation requestByType =
ArrayListMultimap.create();
-268for (RemoteProcedure proc:
remoteProcedures) {
-269 RemoteOperation operation =
proc.remoteCallBuild(env, remote);
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index 3b08b86..80483ee 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -598,7 +598,7 @@
590 * Start a minidfscluster.
591 * @param servers How many DNs to
start.
592 * @throws Exception
-593 * @see {@link
#shutdownMiniDFSCluster()}
+593 * @see #shutdownMiniDFSCluster()
594 * @return The mini dfs cluster
created.
595 */
596 public MiniDFSCluster
startMiniDFSCluster(int servers) throws Exception {
@@ -613,7 +613,7 @@
605 * datanodes will have the same host
name.
606 * @param hosts hostnames DNs to run
on.
607 * @throws Exception
-608 * @see {@link
#shutdownMiniDFSCluster()}
+608 * @see #shutdownMiniDFSCluster()
609 * @return The mini dfs cluster
created.
610 */
611 public MiniDFSCluster
startMiniDFSCluster(final String hosts[])
@@ -631,7 +631,7 @@
623 * @param servers How many DNs to
start.
624 * @param hosts hostnames DNs to run
on.
625 * @throws Exception
-626 * @see {@link
#shutdownMiniDFSCluster()}
+626 * @see #shutdownMiniDFSCluster()
627 * @return The mini dfs cluster
created.
628 */
629 public MiniDFSCluster
startMiniDFSCluster(int servers, final String hosts[])
@@ -775,7 +775,7 @@
767 * Start up a minicluster of hbase,
dfs, and zookeeper.
768 * @throws Exception
769 * @return Mini hbase cluster instance
created.
-770 * @see {@link
#shutdownMiniDFSCluster()}
+770 * @see #shutdownMiniDFSCluster()
771 */
772 public MiniHBaseCluster
startMiniCluster() throws Exception {
773return startMiniCluster(1, 1);
@@ -785,7 +785,7 @@
777 * Start up a minicluster of hbase,
dfs, and zookeeper where WAL's walDir is created separately.
778 * @throws Exception
779 * @return Mini hbase cluster instance
created.
-780 * @see {@link
#shutdownMiniDFSCluster()}
+780 * @see #shutdownMiniDFSCluster()
781 */
782 public MiniHBaseCluster
startMiniCluster(boolean withWALDir) throws Exception {
783return startMiniCluster(1, 1, 1,
null, null, null, false, withWALDir);
@@ -797,7 +797,7 @@
789 * (will overwrite if dir already
exists)
790 * @throws Exception
791 * @return Mini hbase cluster instance
created.
-792 * @see {@link
#shutdownMiniDFSCluster()}
+792 * @see #shutdownMiniDFSCluster()
793 */
794 public MiniHBaseCluster
startMiniCluster(final int numSlaves, boolean create)
795 throws Exception {
@@ -814,7 +814,7 @@
806 * hbase.regionserver.info.port is -1
(i.e. no ui per regionserver) otherwise
807 * bind errors.
808 * @throws Exception
-809 * @see {@link
#shutdownMiniCluster()}
+809 * @see #shutdownMiniCluster()
810 * @return Mini hbase cluster instance
created.
811 */
812 public MiniHBaseCluster
startMiniCluster(final int numSlaves)
@@ -831,7 +831,7 @@
823 * Start minicluster. Whether to create
a new root or data dir path even if such a path
824 * has been created earlier is decided
based on flag codecreate/code
825 * @throws Exception
-826 * @see {@link
#shutdownMiniCluster()}
+826 * @see #shutdownMiniCluster()
827 * @return Mini hbase cluster instance
created.
828 */
829 public MiniHBaseCluster
startMiniCluster(final int numMasters,
@@ -843,7 +843,7 @@
835 /**
836 * start minicluster
837 * @throws Exception
-838 * @see {@link
#shutdownMiniCluster()}
+838 * @see #shutdownMiniCluster()
839 * @return Mini hbase cluster instance
created.
840 */
841 public MiniHBaseCluster
startMiniCluster(final int numMasters,
@@ -880,7 +880,7 @@
872 * If you start MiniDFSCluster without
host names,
873 * all instances of the datanodes will
have the same host name.
874 * @throws Exception
-875 * @see {@link
#shutdownMiniCluster()}
+875 * @see #shutdownMiniCluster()
876 * @return Mini hbase cluster instance
created.
877 */
878 public MiniHBaseCluster
startMiniCluster(final int numMasters,
@@ -922,7 +922,7 @@
914 * @param regionserverClass The class
to use as HRegionServer, or null for
915 * default
916 * @throws Exception
-917 * @see {@link
#shutdownMiniCluster()}
+917 * @see #shutdownMiniCluster()
918 * @return Mini hbase cluster instance
created.
919 */
920 public MiniHBaseCluster
startMiniCluster(final int numMasters,
@@ -1011,7 +1011,7 @@
1003 * @return Reference to the hbase mini
hbase cluster.
1004 *
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index fc0a9f1..458b775 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
-static class PerformanceEvaluation.AppendTest
+static class PerformanceEvaluation.AppendTest
extends PerformanceEvaluation.CASTableTest
@@ -202,8 +202,10 @@ extends Method and Description
-(package private) void
-testRow(inti)
+(package private) boolean
+testRow(inti)
+Test for individual row.
+
@@ -221,13 +223,6 @@ extends onStartup,
onTakedown
-
-
-
-Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
-closeConnection,
createConnection
-
-
@@ -261,7 +256,7 @@ extends
AppendTest
-AppendTest(org.apache.hadoop.hbase.client.Connectioncon,
+AppendTest(org.apache.hadoop.hbase.client.Connectioncon,
PerformanceEvaluation.TestOptionsoptions,
PerformanceEvaluation.Statusstatus)
@@ -280,11 +275,19 @@ extends
testRow
-voidtestRow(inti)
- throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
+booleantestRow(inti)
+ throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
+Description copied from
class:PerformanceEvaluation.TestBase
+Test for individual row.
Specified by:
testRowin
classPerformanceEvaluation.TestBase
+Parameters:
+i - Row index.
+Returns:
+true if the row was sent to server and need to record metrics.
+ False if not, multiGet and multiPut e.g., the rows are sent
+ to server only if enough gets/puts are gathered.
Throws:
https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
index a6d7d96..02a13a6 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
-static class PerformanceEvaluation.AsyncRandomReadTest
+static class PerformanceEvaluation.AsyncRandomReadTest
extends PerformanceEvaluation.AsyncTableTest
@@ -228,8 +228,10 @@ extends runtime(https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
title="class or interface in java.lang">Throwablee)
-(package private) void
-testRow(inti)
+(package private) boolean
+testRow(inti)
+Test for individual row.
+
protected void
@@ -244,13 +246,6 @@ extends onStartup,
onTakedown
-
-
-
-Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.AsyncTest
-closeConnection,
createConnection
-
-
@@ -284,7 +279,7 @@ extends
consistency
-private finalorg.apache.hadoop.hbase.client.Consistency consistency
+private finalorg.apache.hadoop.hbase.client.Consistency consistency
@@ -293,7 +288,7 @@ extends
gets
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
title="class or interface in
java.util">ArrayListorg.apache.hadoop.hbase.client.Get gets
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
title="class or interface in
java.util">ArrayListorg.apache.hadoop.hbase.client.Get gets
@@ -302,7 +297,7 @@ extends
rd
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
title="class or interface in java.util">Random rd
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
title="class or interface in java.util">Random rd
@@ -319,7 +314,7 @@ extends
AsyncRandomReadTest
-AsyncRandomReadTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
+AsyncRandomReadTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
PerformanceEvaluation.TestOptionsoptions,
PerformanceEvaluation.Statusstatus)
@@ -338,12 +333,20 @@ extends
testRow
-voidtestRow(inti)
-
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
index 3f8844b..cdb9398 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
@@ -140,2712 +140,2713 @@
132public class PerformanceEvaluation
extends Configured implements Tool {
133 static final String RANDOM_SEEK_SCAN =
"randomSeekScan";
134 static final String RANDOM_READ =
"randomRead";
-135 private static final Logger LOG =
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136 private static final ObjectMapper
MAPPER = new ObjectMapper();
-137 static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139 }
-140
-141 public static final String TABLE_NAME =
"TestTable";
-142 public static final String
FAMILY_NAME_BASE = "info";
-143 public static final byte[] FAMILY_ZERO
= Bytes.toBytes("info0");
-144 public static final byte[] COLUMN_ZERO
= Bytes.toBytes("" + 0);
-145 public static final int
DEFAULT_VALUE_LENGTH = 1000;
-146 public static final int ROW_LENGTH =
26;
-147
-148 private static final int ONE_GB = 1024
* 1024 * 1000;
-149 private static final int
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150 // TODO : should we make this
configurable
-151 private static final int TAG_LENGTH =
256;
-152 private static final DecimalFormat FMT
= new DecimalFormat("0.##");
-153 private static final MathContext CXT =
MathContext.DECIMAL64;
-154 private static final BigDecimal
MS_PER_SEC = BigDecimal.valueOf(1000);
-155 private static final BigDecimal
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156 private static final TestOptions
DEFAULT_OPTS = new TestOptions();
-157
-158 private static MapString,
CmdDescriptor COMMANDS = new TreeMap();
-159 private static final Path PERF_EVAL_DIR
= new Path("performance_evaluation");
-160
-161 static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173 "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175 "Run random seek and scan 100
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177 "Run random seek scan with both
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179 "Run random seek scan with both
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181 "Run random seek scan with both
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183 "Run random seek scan with both
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185 "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187 "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189 "Run sequential write test");
-190addCommandDescriptor(ScanTest.class,
"scan",
-191 "Run scan test (read every
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193 "Run scan test using a filter to
find a specific row based on it's value " +
-194 "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196 "Increment on each row; clients
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198 "Append on each row; clients
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200 "CheckAndMutate on each row;
clients overlap on keyspace so some concurrent operations");
-201
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
index 4d2c914..0b1cae9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -53,7 +53,7 @@
045 * segments from active set to snapshot
set in the default implementation.
046 */
047@InterfaceAudience.Private
-048public abstract class Segment {
+048public abstract class Segment implements
MemStoreSizing {
049
050 public final static long FIXED_OVERHEAD
= ClassSize.align(ClassSize.OBJECT
051 + 5 * ClassSize.REFERENCE //
cellSet, comparator, memStoreLAB, memStoreSizing,
@@ -67,9 +67,9 @@
059 private final CellComparator
comparator;
060 protected long minSequenceId;
061 private MemStoreLAB memStoreLAB;
-062 // Sum of sizes of all Cells added to
this Segment. Cell's heapSize is considered. This is not
+062 // Sum of sizes of all Cells added to
this Segment. Cell's HeapSize is considered. This is not
063 // including the heap overhead of this
class.
-064 protected final MemStoreSizing
segmentSize;
+064 protected final MemStoreSizing
memStoreSizing;
065 protected final TimeRangeTracker
timeRangeTracker;
066 protected volatile boolean
tagsPresent;
067
@@ -77,352 +77,348 @@
069 // and there is no need in true
Segments state
070 protected Segment(CellComparator
comparator, TimeRangeTracker trt) {
071this.comparator = comparator;
-072this.segmentSize = new
MemStoreSizing();
-073this.timeRangeTracker = trt;
-074 }
-075
-076 protected Segment(CellComparator
comparator, ListImmutableSegment segments,
-077 TimeRangeTracker trt) {
-078long dataSize = 0;
-079long heapSize = 0;
-080long OffHeapSize = 0;
-081for (Segment segment : segments) {
-082 MemStoreSize memStoreSize =
segment.getMemStoreSize();
-083 dataSize +=
memStoreSize.getDataSize();
-084 heapSize +=
memStoreSize.getHeapSize();
-085 OffHeapSize +=
memStoreSize.getOffHeapSize();
-086}
-087this.comparator = comparator;
-088this.segmentSize = new
MemStoreSizing(dataSize, heapSize, OffHeapSize);
-089this.timeRangeTracker = trt;
-090 }
-091
-092 // This constructor is used to create
empty Segments.
-093 protected Segment(CellSet cellSet,
CellComparator comparator, MemStoreLAB memStoreLAB, TimeRangeTracker trt) {
-094this.cellSet.set(cellSet);
-095this.comparator = comparator;
-096this.minSequenceId =
Long.MAX_VALUE;
-097this.memStoreLAB = memStoreLAB;
-098this.segmentSize = new
MemStoreSizing();
-099this.tagsPresent = false;
-100this.timeRangeTracker = trt;
-101 }
-102
-103 protected Segment(Segment segment) {
-104
this.cellSet.set(segment.getCellSet());
-105this.comparator =
segment.getComparator();
-106this.minSequenceId =
segment.getMinSequenceId();
-107this.memStoreLAB =
segment.getMemStoreLAB();
-108this.segmentSize = new
MemStoreSizing(segment.getMemStoreSize());
-109this.tagsPresent =
segment.isTagsPresent();
-110this.timeRangeTracker =
segment.getTimeRangeTracker();
-111 }
-112
-113 /**
-114 * Creates the scanner for the given
read point
-115 * @return a scanner for the given read
point
-116 */
-117 protected KeyValueScanner
getScanner(long readPoint) {
-118return new SegmentScanner(this,
readPoint);
-119 }
-120
-121 public ListKeyValueScanner
getScanners(long readPoint) {
-122return Collections.singletonList(new
SegmentScanner(this, readPoint));
-123 }
-124
-125 /**
-126 * @return whether the segment has any
cells
-127 */
-128 public boolean isEmpty() {
-129return getCellSet().isEmpty();
-130 }
-131
-132 /**
-133 * @return number of cells in segment
-134 */
-135 public int getCellsCount() {
-136return getCellSet().size();
-137 }
-138
-139 /**
-140 * Closing a segment before it is being
discarded
-141 */
-142 public void close() {
-143if (this.memStoreLAB != null) {
-144 this.memStoreLAB.close();
-145}
-146// do not set MSLab to null as
scanners may still be reading the data here and need to decrease
-147// the counter when they finish
-148 }
-149
-150 /**
-151 * If the segment has a memory
allocator the cell is being cloned to this space, and returned;
-152 * otherwise the given cell is
returned
-153 *
-154 * When a cell's size is too big
(bigger than maxAlloc), it is not allocated on MSLAB.
-155 * Since the process of flattening to
CellChunkMap assumes that all cells
-156 * are allocated on MSLAB, during this
process, the input parameter
-157 * forceCloneOfBigCell is set to 'true'
and the cell is copied
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
index 2510283..418c60c 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
@@ -77,77 +77,77 @@
069import
org.apache.hadoop.hbase.client.RowMutations;
070import
org.apache.hadoop.hbase.client.Scan;
071import
org.apache.hadoop.hbase.client.Table;
-072import
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import
org.apache.hadoop.hbase.filter.Filter;
-074import
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import
org.apache.hadoop.hbase.filter.FilterList;
-076import
org.apache.hadoop.hbase.filter.PageFilter;
-077import
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import
org.apache.hadoop.hbase.io.compress.Compression;
-080import
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import
org.apache.hadoop.hbase.regionserver.BloomType;
-084import
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import
org.apache.hadoop.hbase.trace.TraceUtil;
-088import
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import
org.apache.hadoop.hbase.util.Bytes;
-090import
org.apache.hadoop.hbase.util.Hash;
-091import
org.apache.hadoop.hbase.util.MurmurHash;
-092import
org.apache.hadoop.hbase.util.Pair;
-093import
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import
org.apache.hadoop.mapreduce.Mapper;
-098import
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import
org.apache.hadoop.util.ToolRunner;
-103import
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import
org.apache.htrace.core.TraceScope;
-106import
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase
performance and scalability. Runs a HBase
-114 * client that steps through one of a set
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random
writes test, etc.). Pass on the
-116 * command-line which test to run and how
many clients are participating in
-117 * this experiment. Run {@code
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs
the evaluation programs described in
-120 * Section 7, iPerformance
Evaluation/i, of the a
-121 *
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation
extends Configured implements Tool {
-131 static final String RANDOM_SEEK_SCAN =
"randomSeekScan";
-132 static final String RANDOM_READ =
"randomRead";
-133 private static final Logger LOG =
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134 private static final ObjectMapper
MAPPER = new ObjectMapper();
-135 static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137 }
-138
-139 public static final String TABLE_NAME =
"TestTable";
-140 public static final byte[] FAMILY_NAME
= Bytes.toBytes("info");
-141 public static final byte [] COLUMN_ZERO
= Bytes.toBytes("" + 0);
-142 public static final byte []
QUALIFIER_NAME = COLUMN_ZERO;
+072import
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import
org.apache.hadoop.hbase.filter.Filter;
+075import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
deleted file mode 100644
index 7a938de..000
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
+++ /dev/null
@@ -1,632 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software
Foundation (ASF) under one
-003 * or more contributor license
agreements. See the NOTICE file
-004 * distributed with this work for
additional information
-005 * regarding copyright ownership. The
ASF licenses this file
-006 * to you under the Apache License,
Version 2.0 (the
-007 * "License"); you may not use this file
except in compliance
-008 * with the License. You may obtain a
copy of the License at
-009 *
-010 *
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or
agreed to in writing, software
-013 * distributed under the License is
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
-015 * See the License for the specific
language governing permissions and
-016 * limitations under the License.
-017 */
-018package
org.apache.hadoop.hbase.replication;
-019
-020import static org.mockito.Mockito.mock;
-021import static
org.mockito.Mockito.verify;
-022import static org.mockito.Mockito.when;
-023
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.List;
-027import java.util.UUID;
-028import
java.util.concurrent.atomic.AtomicBoolean;
-029import
java.util.concurrent.atomic.AtomicInteger;
-030import
java.util.concurrent.atomic.AtomicReference;
-031import org.apache.hadoop.hbase.Cell;
-032import
org.apache.hadoop.hbase.HBaseClassTestRule;
-033import org.apache.hadoop.hbase.Waiter;
-034import
org.apache.hadoop.hbase.client.Connection;
-035import
org.apache.hadoop.hbase.client.ConnectionFactory;
-036import
org.apache.hadoop.hbase.client.Put;
-037import
org.apache.hadoop.hbase.client.RegionInfo;
-038import
org.apache.hadoop.hbase.client.Table;
-039import
org.apache.hadoop.hbase.regionserver.HRegion;
-040import
org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
-041import
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
-042import
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
-043import
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
-044import
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
-045import
org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
-046import
org.apache.hadoop.hbase.testclassification.MediumTests;
-047import
org.apache.hadoop.hbase.testclassification.ReplicationTests;
-048import
org.apache.hadoop.hbase.util.Bytes;
-049import
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-050import
org.apache.hadoop.hbase.util.Threads;
-051import
org.apache.hadoop.hbase.wal.WAL.Entry;
-052import
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-053import
org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-054import org.junit.AfterClass;
-055import org.junit.Assert;
-056import org.junit.Before;
-057import org.junit.BeforeClass;
-058import org.junit.ClassRule;
-059import org.junit.Test;
-060import
org.junit.experimental.categories.Category;
-061import org.slf4j.Logger;
-062import org.slf4j.LoggerFactory;
-063
-064/**
-065 * Tests ReplicationSource and
ReplicationEndpoint interactions
-066 */
-067@Category({ ReplicationTests.class,
MediumTests.class })
-068public class TestReplicationEndpoint
extends TestReplicationBase {
-069
-070 @ClassRule
-071 public static final HBaseClassTestRule
CLASS_RULE =
-072
HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
-073
-074 private static final Logger LOG =
LoggerFactory.getLogger(TestReplicationEndpoint.class);
-075
-076 static int numRegionServers;
-077
-078 @BeforeClass
-079 public static void setUpBeforeClass()
throws Exception {
-080
TestReplicationBase.setUpBeforeClass();
-081numRegionServers =
utility1.getHBaseCluster().getRegionServerThreads().size();
-082 }
-083
-084 @AfterClass
-085 public static void tearDownAfterClass()
throws Exception {
-086
TestReplicationBase.tearDownAfterClass();
-087// check stop is called
-088
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index 8302e28..c370eb9 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -2113,3031 +2113,3033 @@
2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
2106tableName + " unable to
delete dangling table state " + tableState);
2107 }
-2108} else {
-2109
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110 tableName + " has dangling
table state " + tableState);
-2111}
-2112 }
-2113}
-2114// check that all tables have
states
-2115for (TableName tableName :
tablesInfo.keySet()) {
-2116 if (isTableIncluded(tableName)
!tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118
MetaTableAccessor.updateTableState(connection, tableName,
TableState.State.ENABLED);
-2119 TableState newState =
MetaTableAccessor.getTableState(connection, tableName);
-2120 if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state
for table " + tableName + " in meta ");
-2123 }
-2124} else {
-2125
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126 tableName + " has no state
in meta ");
-2127}
-2128 }
-2129}
-2130 }
-2131
-2132 private void preCheckPermission()
throws IOException, AccessDeniedException {
-2133if
(shouldIgnorePreCheckPermission()) {
-2134 return;
-2135}
-2136
-2137Path hbaseDir =
FSUtils.getRootDir(getConf());
-2138FileSystem fs =
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider =
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi =
userProvider.getCurrent().getUGI();
-2141FileStatus[] files =
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143 try {
-2144FSUtils.checkAccess(ugi, file,
FsAction.WRITE);
-2145 } catch (AccessDeniedException
ace) {
-2146LOG.warn("Got
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " +
ugi.getUserName()
-2148 + " does not have write perms
to " + file.getPath()
-2149 + ". Please rerun hbck as hdfs
user " + file.getOwner());
-2150throw ace;
-2151 }
-2152}
-2153 }
-2154
-2155 /**
-2156 * Deletes region from meta table
-2157 */
-2158 private void deleteMetaRegion(HbckInfo
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160 }
-2161
-2162 /**
-2163 * Deletes region from meta table
-2164 */
-2165 private void deleteMetaRegion(byte[]
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " +
Bytes.toString(metaKey) + " from META" );
-2169 }
-2170
-2171 /**
-2172 * Reset the split parent region info
in meta table
-2173 */
-2174 private void resetSplitParent(HbckInfo
hi) throws IOException {
-2175RowMutations mutations = new
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri =
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p =
MetaTableAccessor.makePutFromRegionInfo(hri,
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " +
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190 }
-2191
-2192 /**
-2193 * This backwards-compatibility
wrapper for permanently offlining a region
-2194 * that should not be alive. If the
region server does not support the
-2195 * "offline" method, it will use the
closest unassign method instead. This
-2196 * will basically work until one
attempts to disable or delete the affected
-2197 * table. The problem has to do with
in-memory only master state, so
-2198 * restarting the HMaster or failing
over to another should fix this.
-2199 */
-2200 private void offline(byte[]
regionName) throws IOException {
-2201String regionString =
Bytes.toStringBinary(regionName);
-2202if
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
index bcb65f1..a9d5986 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
@@ -39,1086 +39,1087 @@
031import java.util.Scanner;
032import java.util.Set;
033import java.util.TreeMap;
-034import
org.apache.commons.cli.CommandLine;
-035import
org.apache.commons.cli.GnuParser;
-036import
org.apache.commons.cli.HelpFormatter;
-037import org.apache.commons.cli.Options;
-038import
org.apache.commons.cli.ParseException;
-039import
org.apache.commons.lang3.StringUtils;
-040import
org.apache.hadoop.conf.Configuration;
-041import org.apache.hadoop.fs.FileSystem;
-042import
org.apache.hadoop.hbase.ClusterMetrics.Option;
-043import
org.apache.hadoop.hbase.HBaseConfiguration;
-044import
org.apache.hadoop.hbase.HConstants;
-045import
org.apache.hadoop.hbase.ServerName;
-046import
org.apache.hadoop.hbase.TableName;
-047import
org.apache.hadoop.hbase.client.Admin;
-048import
org.apache.hadoop.hbase.client.ClusterConnection;
-049import
org.apache.hadoop.hbase.client.Connection;
-050import
org.apache.hadoop.hbase.client.ConnectionFactory;
-051import
org.apache.hadoop.hbase.client.RegionInfo;
-052import
org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
-053import
org.apache.hadoop.hbase.favored.FavoredNodesPlan;
-054import
org.apache.hadoop.hbase.util.FSUtils;
-055import
org.apache.hadoop.hbase.util.MunkresAssignment;
-056import
org.apache.hadoop.hbase.util.Pair;
-057import
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060
-061import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-063import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-064import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-065import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
-066
-067/**
-068 * A tool that is used for manipulating
and viewing favored nodes information
-069 * for regions. Run with -h to get a list
of the options
-070 */
-071@InterfaceAudience.Private
-072// TODO: Remove? Unused. Partially
implemented only.
-073public class RegionPlacementMaintainer
{
-074 private static final Logger LOG =
LoggerFactory.getLogger(RegionPlacementMaintainer.class
-075 .getName());
-076 //The cost of a placement that should
never be assigned.
-077 private static final float MAX_COST =
Float.POSITIVE_INFINITY;
-078
-079 // The cost of a placement that is
undesirable but acceptable.
-080 private static final float AVOID_COST =
10f;
-081
-082 // The amount by which the cost of a
placement is increased if it is the
-083 // last slot of the server. This is
done to more evenly distribute the slop
-084 // amongst servers.
-085 private static final float
LAST_SLOT_COST_PENALTY = 0.5f;
-086
-087 // The amount by which the cost of a
primary placement is penalized if it is
-088 // not the host currently serving the
region. This is done to minimize moves.
-089 private static final float
NOT_CURRENT_HOST_PENALTY = 0.1f;
-090
-091 private static boolean
USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false;
-092
-093 private Configuration conf;
-094 private final boolean
enforceLocality;
-095 private final boolean
enforceMinAssignmentMove;
-096 private RackManager rackManager;
-097 private SetTableName
targetTableSet;
-098 private final Connection connection;
-099
-100 public
RegionPlacementMaintainer(Configuration conf) {
-101this(conf, true, true);
-102 }
-103
-104 public
RegionPlacementMaintainer(Configuration conf, boolean enforceLocality,
-105 boolean enforceMinAssignmentMove)
{
-106this.conf = conf;
-107this.enforceLocality =
enforceLocality;
-108this.enforceMinAssignmentMove =
enforceMinAssignmentMove;
-109this.targetTableSet = new
HashSet();
-110this.rackManager = new
RackManager(conf);
-111try {
-112 this.connection =
ConnectionFactory.createConnection(this.conf);
-113} catch (IOException e) {
-114 throw new RuntimeException(e);
-115}
-116 }
-117
-118 private static void printHelp(Options
opt) {
-119new HelpFormatter().printHelp(
-120"RegionPlacement -w | -u |
-n | -v | -t | -h | -overwrite -r regionName -f favoredNodes "
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index c1f1d24..55bfe4f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -18,7 +18,7 @@
catch(err) {
}
//-->
-var methods =
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10};
+var methods =
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":42,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
var tabs = {65535:["t0","All Methods"],1:["t1","Static
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete
Methods"],32:["t6","Deprecated Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
@@ -151,7 +151,7 @@ extends setTimeRange.
- To only retrieve columns with a specific timestamp, call setTimestamp
+ To only retrieve columns with a specific timestamp, call setTimestamp
.
To limit the number of versions of each column to be returned, call setMaxVersions.
@@ -861,37 +861,46 @@ extends
Scan
-setTimeStamp(longtimestamp)
+setTimestamp(longtimestamp)
Get versions of columns with the specified timestamp.
+Scan
+setTimeStamp(longtimestamp)
+Deprecated.
+As of release 2.0.0, this
will be removed in HBase 3.0.0.
+ Use setTimestamp(long)
instead
+
+
+
+
https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
toMap(intmaxCols)
Compile the details beyond the scope of getFingerprint
(row, columns,
timestamps, etc.) into a Map along with the fingerprinted information.
-
+
Scan
withStartRow(byte[]startRow)
Set the start row of the scan.
-
+
Scan
withStartRow(byte[]startRow,
booleaninclusive)
Set the start row of the scan.
-
+
Scan
withStopRow(byte[]stopRow)
Set the stop row of the scan.
-
+
Scan
withStopRow(byte[]stopRow,
booleaninclusive)
@@ -1436,8 +1445,11 @@ public
setTimeStamp
-publicScansetTimeStamp(longtimestamp)
- throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
+https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
title="class or interface in java.lang">@Deprecated
+publicScansetTimeStamp(longtimestamp)
+ throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
+Deprecated.As of release 2.0.0, this will be removed in HBase
3.0.0.
+ Use setTimestamp(long)
instead
Get versions of columns with the specified timestamp. Note,
default maximum
versions to return is 1. If your time range spans more than one version
and you want all versions returned, up the number of versions beyond the
@@ -1455,13 +1467,35 @@ public
+
+
+
+
+setTimestamp
+publicScansetTimestamp(longtimestamp)
+Get versions of columns with the specified timestamp. Note,
default maximum
+ versions to return is 1. If your time range spans more than one version
+ and you want all versions returned, up the number of versions beyond the
+ defaut.
+
+Parameters:
+timestamp - version timestamp
+Returns:
+this
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
index d9798c9..5e23e90 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
@@ -147,17 +147,17 @@
boolean
-SplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringtaskName,
+ZKSplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringpath,
SplitLogManager.Tasktask,
-SplitLogManager.ResubmitDirectiveforce)
-Resubmit the task in case if found unassigned or
failed
-
+SplitLogManager.ResubmitDirectivedirective)
boolean
-ZKSplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringpath,
+SplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringtaskName,
SplitLogManager.Tasktask,
-SplitLogManager.ResubmitDirectivedirective)
+SplitLogManager.ResubmitDirectiveforce)
+Resubmit the task in case if found unassigned or
failed
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
index a154e61..650fbb7 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
@@ -138,11 +138,11 @@
TableStateManager
-MasterServices.getTableStateManager()
+HMaster.getTableStateManager()
TableStateManager
-HMaster.getTableStateManager()
+MasterServices.getTableStateManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
index b5b7703..a444123 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
@@ -117,11 +117,11 @@
LockManager
-MasterServices.getLockManager()
+HMaster.getLockManager()
LockManager
-HMaster.getLockManager()
+MasterServices.getLockManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
index e7cc074..029d065 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
@@ -104,15 +104,15 @@
NormalizationPlan.PlanType
-NormalizationPlan.getType()
+MergeNormalizationPlan.getType()
NormalizationPlan.PlanType
-SplitNormalizationPlan.getType()
+NormalizationPlan.getType()
NormalizationPlan.PlanType
-MergeNormalizationPlan.getType()
+SplitNormalizationPlan.getType()
NormalizationPlan.PlanType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
index d8fb2f6..ad4e9b4 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
@@ -125,11 +125,11 @@
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
index d30ee5e..b58c054 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
@@ -166,27 +166,27 @@
DataBlockEncoder.EncodedSeeker
-CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
+RowIndexCodecV1.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
+DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-RowIndexCodecV1.createSeeker(CellComparatorcomparator,
+PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
@@ -198,13 +198,13 @@
https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
- HFileBlockDecodingContextblkDecodingCtx)
+RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+ HFileBlockDecodingContextdecodingCtx)
https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
- HFileBlockDecodingContextdecodingCtx)
+BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+ HFileBlockDecodingContextblkDecodingCtx)
@@ -279,17 +279,17 @@
HFileBlockDecodingContext
-NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
HFileBlockDecodingContext
-HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
-create a encoder specific decoding context for
reading.
-
+NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
+HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
+create a encoder specific decoding context for
reading.
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
index cbdb3c8..468913a 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
@@ -116,36 +116,36 @@
HFileBlockDefaultDecodingContextdecodingCtx)
-protected abstract https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.internalDecodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+protected https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
index f0d831a..81af282 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
@@ -3557,22 +3557,26 @@
static HBaseClassTestRule
-TestDeleteNamespaceProcedure.CLASS_RULE
+TestRecoverMetaProcedure.CLASS_RULE
static HBaseClassTestRule
-TestServerCrashProcedure.CLASS_RULE
+TestDeleteNamespaceProcedure.CLASS_RULE
static HBaseClassTestRule
-TestEnableTableProcedure.CLASS_RULE
+TestServerCrashProcedure.CLASS_RULE
static HBaseClassTestRule
-TestFastFailOnProcedureNotRegistered.CLASS_RULE
+TestEnableTableProcedure.CLASS_RULE
static HBaseClassTestRule
+TestFastFailOnProcedureNotRegistered.CLASS_RULE
+
+
+static HBaseClassTestRule
TestDeleteTableProcedure.CLASS_RULE
@@ -4927,86 +4931,90 @@
static HBaseClassTestRule
-TestSecureWALReplay.CLASS_RULE
+TestWALDurability.CLASS_RULE
static HBaseClassTestRule
-TestLogRollingNoCluster.CLASS_RULE
+TestSecureWALReplay.CLASS_RULE
static HBaseClassTestRule
-TestSequenceIdAccounting.CLASS_RULE
+TestLogRollingNoCluster.CLASS_RULE
static HBaseClassTestRule
-TestLogRollPeriod.CLASS_RULE
+TestSequenceIdAccounting.CLASS_RULE
static HBaseClassTestRule
-TestAsyncLogRolling.CLASS_RULE
+TestLogRollPeriod.CLASS_RULE
static HBaseClassTestRule
-TestWALCellCodecWithCompression.CLASS_RULE
+TestAsyncLogRolling.CLASS_RULE
static HBaseClassTestRule
-TestWALActionsListener.CLASS_RULE
+TestWALCellCodecWithCompression.CLASS_RULE
static HBaseClassTestRule
-TestAsyncWALReplay.CLASS_RULE
+TestWALActionsListener.CLASS_RULE
static HBaseClassTestRule
-TestFSHLog.CLASS_RULE
+TestAsyncWALReplay.CLASS_RULE
static HBaseClassTestRule
-TestProtobufLog.CLASS_RULE
+TestFSHLog.CLASS_RULE
static HBaseClassTestRule
-TestWALReplay.CLASS_RULE
+TestProtobufLog.CLASS_RULE
static HBaseClassTestRule
-TestWALReplayBoundedLogWriterCreation.CLASS_RULE
+TestWALReplay.CLASS_RULE
static HBaseClassTestRule
-TestWALReplayCompressed.CLASS_RULE
+TestWALReplayBoundedLogWriterCreation.CLASS_RULE
static HBaseClassTestRule
-TestAsyncFSWAL.CLASS_RULE
+TestWALReplayCompressed.CLASS_RULE
static HBaseClassTestRule
-TestCompressor.CLASS_RULE
+TestAsyncFSWAL.CLASS_RULE
static HBaseClassTestRule
-TestAsyncLogRollPeriod.CLASS_RULE
+TestCompressor.CLASS_RULE
static HBaseClassTestRule
-TestLogRollAbort.CLASS_RULE
+TestAsyncLogRollPeriod.CLASS_RULE
static HBaseClassTestRule
-TestCustomWALCellCodec.CLASS_RULE
+TestLogRollAbort.CLASS_RULE
static HBaseClassTestRule
-TestDurability.CLASS_RULE
+TestCustomWALCellCodec.CLASS_RULE
static HBaseClassTestRule
-TestLogRolling.CLASS_RULE
+TestDurability.CLASS_RULE
static HBaseClassTestRule
+TestLogRolling.CLASS_RULE
+
+
+static HBaseClassTestRule
TestAsyncProtobufLog.CLASS_RULE
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
index e9edcac..53a9af9 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
@@ -2336,6 +2336,10 @@
MasterProcedureSchedulerPerformanceEvaluation.UTIL
+private static HBaseTestingUtility
+TestRecoverMetaProcedure.UTIL
+
+
protected static HBaseTestingUtility
TestDeleteNamespaceProcedure.UTIL
@@ -3119,37 +3123,41 @@
private static HBaseTestingUtility
-TestLogRollingNoCluster.TEST_UTIL
+TestWALDurability.TEST_UTIL
+private static HBaseTestingUtility
+TestLogRollingNoCluster.TEST_UTIL
+
+
(package private) static HBaseTestingUtility
AbstractTestWALReplay.TEST_UTIL
-
+
private static HBaseTestingUtility
TestWALActionsListener.TEST_UTIL
-
+
protected static HBaseTestingUtility
AbstractTestProtobufLog.TEST_UTIL
-
+
protected static HBaseTestingUtility
AbstractTestLogRolling.TEST_UTIL
-
+
protected static HBaseTestingUtility
AbstractTestLogRollPeriod.TEST_UTIL
-
+
protected static HBaseTestingUtility
TestLogRollAbort.TEST_UTIL
-
+
protected static HBaseTestingUtility
AbstractTestFSWAL.TEST_UTIL
-
+
private static HBaseTestingUtility
TestDurability.TEST_UTIL
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 3bc66bb..97aa79c 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -1435,459 +1435,460 @@
1427 */
1428 private void execProcedure(final
RootProcedureState procStack,
1429 final
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of
this procedure will be unsuspended later by an external event
-1435// such the report of a region open.
TODO: Currently, its possible for two worker threads
-1436// to be working on the same
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can
make for issues if both threads are changing state.
-1439// See
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler
making it possible for two threads running against
-1442// the one Procedure. Might be ok if
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[]
subprocs = null;
-1449do {
-1450 reExecute = false;
-1451 try {
-1452subprocs =
procedure.doExecute(getEnvironment());
-1453if (subprocs != null
subprocs.length == 0) {
-1454 subprocs = null;
-1455}
-1456 } catch
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458 LOG.trace("Suspend " +
procedure);
-1459}
-1460suspended = true;
-1461 } catch (ProcedureYieldException
e) {
-1462if (LOG.isTraceEnabled()) {
-1463 LOG.trace("Yield " + procedure
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467 } catch (InterruptedException e)
{
-1468if (LOG.isTraceEnabled()) {
-1469 LOG.trace("Yield interrupt " +
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474 } catch (Throwable e) {
-1475// Catch NullPointerExceptions
or similar errors...
-1476String msg = "CODE-BUG: Uncaught
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new
RemoteProcedureException(msg, e));
-1479 }
-1480
-1481 if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483 if (subprocs.length == 1
subprocs[0] == procedure) {
-1484// Procedure returned
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled())
{
-1489 LOG.trace("Short-circuit
to next step on pid=" + procedure.getProcId());
-1490}
-1491 } else {
-1492// Yield the current
procedure, and make the subprocedure runnable
-1493// subprocs may come back
'null'.
-1494subprocs =
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized
subprocedures=" +
-1496 (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499 }
-1500} else if (procedure.getState()
== ProcedureState.WAITING_TIMEOUT) {
-1501 if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to
timeoutExecutor " + procedure);
-1503 }
-1504
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506 // No subtask, so we are
done
-1507
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509 }
-1510
-1511
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
index 13d376b..249cd71 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
@@ -55,389 +55,388 @@
047import
org.apache.hadoop.hbase.util.JVMClusterUtil;
048import
org.apache.hadoop.hbase.util.Threads;
049import org.junit.ClassRule;
-050import org.junit.Ignore;
-051import org.junit.Test;
-052import
org.junit.experimental.categories.Category;
-053import org.junit.runner.RunWith;
-054import org.junit.runners.Parameterized;
-055
-056/**
-057 * Class to test asynchronous region
admin operations.
-058 * @see TestAsyncRegionAdminApi2 This
test and it used to be joined it was taking longer than our
-059 * ten minute timeout so they were
split.
-060 */
-061@RunWith(Parameterized.class)
-062@Category({ LargeTests.class,
ClientTests.class })
-063public class TestAsyncRegionAdminApi
extends TestAsyncAdminBase {
-064 @ClassRule
-065 public static final HBaseClassTestRule
CLASS_RULE =
-066
HBaseClassTestRule.forClass(TestAsyncRegionAdminApi.class);
-067
-068 @Test
-069 public void
testAssignRegionAndUnassignRegion() throws Exception {
-070
createTableWithDefaultConf(tableName);
-071
-072// assign region.
-073HMaster master =
TEST_UTIL.getHBaseCluster().getMaster();
-074AssignmentManager am =
master.getAssignmentManager();
-075RegionInfo hri =
am.getRegionStates().getRegionsOfTable(tableName).get(0);
-076
-077// assert region on server
-078RegionStates regionStates =
am.getRegionStates();
-079ServerName serverName =
regionStates.getRegionServerOfRegion(hri);
-080TEST_UTIL.assertRegionOnServer(hri,
serverName, 200);
-081
assertTrue(regionStates.getRegionState(hri).isOpened());
-082
-083// Region is assigned now. Let's
assign it again.
-084// Master should not abort, and
region should stay assigned.
-085
admin.assign(hri.getRegionName()).get();
-086try {
-087 am.waitForAssignment(hri);
-088 fail("Expected
NoSuchProcedureException");
-089} catch (NoSuchProcedureException e)
{
-090 // Expected
-091}
-092
assertTrue(regionStates.getRegionState(hri).isOpened());
-093
-094// unassign region
-095admin.unassign(hri.getRegionName(),
true).get();
-096try {
-097 am.waitForAssignment(hri);
-098 fail("Expected
NoSuchProcedureException");
-099} catch (NoSuchProcedureException e)
{
-100 // Expected
-101}
-102
assertTrue(regionStates.getRegionState(hri).isClosed());
-103 }
-104
-105 RegionInfo
createTableAndGetOneRegion(final TableName tableName)
-106 throws IOException,
InterruptedException, ExecutionException {
-107TableDescriptor desc =
-108
TableDescriptorBuilder.newBuilder(tableName)
-109
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
-110admin.createTable(desc,
Bytes.toBytes("A"), Bytes.toBytes("Z"), 5).get();
-111
-112// wait till the table is assigned
-113HMaster master =
TEST_UTIL.getHBaseCluster().getMaster();
-114long timeoutTime =
System.currentTimeMillis() + 3000;
-115while (true) {
-116 ListRegionInfo regions =
-117
master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
-118 if (regions.size() 3) {
-119return regions.get(2);
-120 }
-121 long now =
System.currentTimeMillis();
-122 if (now timeoutTime) {
-123fail("Could not find an online
region");
-124 }
-125 Thread.sleep(10);
-126}
-127 }
-128
-129 @Test
-130 public void
testGetRegionByStateOfTable() throws Exception {
-131RegionInfo hri =
createTableAndGetOneRegion(tableName);
-132
-133RegionStates regionStates =
-134
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
-135
assertTrue(regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN)
-136.stream().anyMatch(r -
RegionInfo.COMPARATOR.compare(r, hri) == 0));
-137
assertFalse(regionStates.getRegionByStateOfTable(TableName.valueOf("I_am_the_phantom"))
-138
.get(RegionState.State.OPEN).stream().anyMatch(r -
RegionInfo.COMPARATOR.compare(r, hri) == 0));
-139 }
-140
-141 @Test
-142 public void testMoveRegion() throws
Exception {
-143admin.balancerSwitch(false).join();
-144
-145RegionInfo hri =
createTableAndGetOneRegion(tableName);
-146RawAsyncHBaseAdmin rawAdmin =
(RawAsyncHBaseAdmin) ASYNC_CONN.getAdmin();
-147ServerName serverName =
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 8b1ac9b..dcd9fce 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -704,20 +704,20 @@
java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable)
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.BloomType
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.ScanType
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index a62f000..7208218 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -2116,6 +2116,11 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
+Region
+Region is a subset of HRegion with operations required for
the Coprocessors.
+
+
+
RegionServerServices
A curated subset of services provided by HRegionServer.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 23060c2..2731576 100644
---
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable)
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.ActiveOperation.html
--
diff --git
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
index 238fee7..262cf46 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
@@ -152,27 +152,27 @@ the order they are declared.
PeerProcedureInterface.PeerOperationType
-RefreshPeerProcedure.getPeerOperationType()
+DisablePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-DisablePeerProcedure.getPeerOperationType()
+RemovePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-UpdatePeerConfigProcedure.getPeerOperationType()
+EnablePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-AddPeerProcedure.getPeerOperationType()
+RefreshPeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-EnablePeerProcedure.getPeerOperationType()
+AddPeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-RemovePeerProcedure.getPeerOperationType()
+UpdatePeerConfigProcedure.getPeerOperationType()
private static PeerProcedureInterface.PeerOperationType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
index 52693ba..f5001f1 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
@@ -185,7 +185,7 @@
private ProcedurePrepareLatch
-RecoverMetaProcedure.syncLatch
+AbstractStateMachineNamespaceProcedure.syncLatch
private ProcedurePrepareLatch
@@ -193,7 +193,7 @@
private ProcedurePrepareLatch
-AbstractStateMachineNamespaceProcedure.syncLatch
+RecoverMetaProcedure.syncLatch
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
index 280fbd6..5929a91 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
@@ -104,14 +104,14 @@
ServerProcedureInterface.ServerOperationType
-ServerCrashProcedure.getServerOperationType()
-
-
-ServerProcedureInterface.ServerOperationType
ServerProcedureInterface.getServerOperationType()
Given an operation type we can take decisions about what to
do with pending operations.
+
+ServerProcedureInterface.ServerOperationType
+ServerCrashProcedure.getServerOperationType()
+
static ServerProcedureInterface.ServerOperationType
ServerProcedureInterface.ServerOperationType.valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
index f9a40a6..9f856b9 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
@@ -112,19 +112,19 @@
TableProcedureInterface.TableOperationType
-MoveRegionProcedure.getTableOperationType()
+UnassignProcedure.getTableOperationType()
TableProcedureInterface.TableOperationType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index f8352b7..8b1c4bd 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -247,14 +247,14 @@ extends
Methods inherited from classorg.apache.hadoop.hbase.master.HMaster
-abort,
abortProcedure,
addColumn,
addReplicationPeer,
balance,
balance,
balanceSwitch,
canCreateBaseZNode, canUpdateTableDescriptor,
checkIfShouldMoveSystemRegionAsync,
checkInitialized,
checkServiceStarted,
checkTableModifiable,
configureInfoServer,
constructMaster,
createMetaBootstrap,
createNamespace,
createQuotaSnapshotNotifier,
createRpcServices,
createServerManager,
createSystemTable,
createTable,
decommissionRegionServers,
decorateMasterConfiguration,
deleteColumn,
deleteNamespace,
deleteTable,
disableReplicationPeer,
disableTable,
enableReplicationPeer,
enableTable,
getAssignmentManager,
getAverageLoad,
getCatalogJanitor,
getClientIdAuditPrefix,
getClusterMetrics,
getClusterMetrics,
getClusterMetricsWithoutCoprocessor,
getClusterMetricsWithoutCoprocessor,
getClusterSchema,
getDumpServlet,
getFavoredNodesManager,
getHFileCleaner,
getInitializedEvent,
getLastMajorCompactionTimestamp,
getLastMajorCompactionTimestampForRegion,
getLoadBalancer,
getLoadBalancerClassName,
getLoadedCoprocessors,
getLockManager,
getLocks,
getLogCleaner,
getMasterActiveTime,
getMasterCoprocessorHost,
getMasterCoprocessors,
getMasterFileSystem,
getMasterFinishedInitializationTime,
getMasterMetrics,
getMasterProcedureExecutor,
getMasterProcedureManagerHost,
getMasterQuotaManager,
getMasterRpcServices,
getMasterStartTime,
getMasterWalManager,
getMergePlanCount,
getMetaTableObserver,
getMobCompactionState,
getNamespace,
getNamespaces, getNumWALFiles,
getProcedures,
getProcessName,
getQuotaObserverChore,
getRegionNormalizer,
getRegionNormalizerTracker,
getRegionServerFatalLogBuffer,
getRegionServerInfoPort, getRegionServerVersion,
getRemoteInetAddress,
getReplicationPeerConfig,
getReplicationPeerManager,
getServerCrashProcessingEnabledEvent,
getServerManager,
getServerName,
getSnapshotManager, getSpaceQuotaSnapshotNotifier,
getSplitOrMergeTracker,
getSplitPlanCount,
getTableDescriptors,
getTableRegionForRow,
getTableStateManager,
getUseThisHostnameInstead,
getWalProcedureStore,
getZooKeeper,
initClusterSchemaService,
initializeZKBasedSystemTrackers,
initQuotaManager,
isActiveMaster,
isBalancerOn,
isCatalogJanitorEnabled,
isCleanerChoreEnabled, isInitialized,
isInMaintenanceMode,
isNormalizerOn,
isServerCrashProcessingEnabled,
isSplitOrMergeEnabled,
listDecommissionedRegionServers,
listReplicationPeers,
listTableDescriptors,
listTableDescriptorsByNamespace,
listTableNames,
listTableNamesByNamespace,
login,
main,
mergeRegions,
modifyColumn,
modifyNamespace,
modifyTable,
move,
normalizeRegions,
recommissionRegionServer,
recoverMeta, registerService,
remoteProcedureCompleted,
remoteProcedureFailed,
removeReplicationPeer,
reportMobCompactionEnd,
reportMobCompactionStart,
requestMobCompaction,
restoreSnapshot,
setCatalogJanitorEnabled,
setInitialized,
setServerCrashProcessingEnabled,
shutdown,
splitRegion,
stop, stopMaster,
stopServiceThreads,
truncateTable,
updateConfigurationForSpaceQuotaObserver,
updateReplicationPeerConfig,
waitForMasterActive
+abort,
abortProcedure,
addColumn,
addReplicationPeer,
balance,
balance,
balanceSwitch,
canCreateBaseZNode, canUpdateTableDescriptor,
checkIfShouldMoveSystemRegionAsync,
checkInitialized,
checkServiceStarted,
checkTableModifiable,
configureInfoServer,
constructMaster,
createMetaBootstrap,
createNamespace,
createQuotaSnapshotNotifier,
createRpcServices,
createServerManager,
createSystemTable,
createTable,
decommissionRegionServers,
decorateMasterConfiguration,
deleteColumn,
deleteNamespace,
deleteTable,
disableReplicationPeer,
disableTable,
enableReplicationPeer,
enableTable,
getAssignmentManager,
getAverageLoad,
getCatalogJanitor,
getClientIdAuditPrefix,
getClusterMetrics,
getClusterMetrics,
getClusterMetricsWithoutCoprocessor,
getClusterMetricsWithoutCoprocessor,
getClusterSchema,
getDumpServlet,
getFavoredNodesManager,
getHFileCleaner,
getInitializedEvent,
getLastMajorCompactionTimestamp,
getLastMajorCompactionTimestampForRegion,
getLoadBalancer,
getLoadBalancerClassName,
getLoadedCoprocessors,
getLockManager,
getLocks,
getLogCleaner,
getMasterActiveTime,
getMasterCoprocessorHost,
getMasterCoprocessors,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
index c751af0..7f98047 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
@@ -7,48 +7,48 @@
001/**
-002 *
-003 * Licensed to the Apache Software
Foundation (ASF) under one
-004 * or more contributor license
agreements. See the NOTICE file
-005 * distributed with this work for
additional information
-006 * regarding copyright ownership. The
ASF licenses this file
-007 * to you under the Apache License,
Version 2.0 (the
-008 * "License"); you may not use this file
except in compliance
-009 * with the License. You may obtain a
copy of the License at
-010 *
-011 *
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or
agreed to in writing, software
-014 * distributed under the License is
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
-016 * See the License for the specific
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software
Foundation (ASF) under one
+003 * or more contributor license
agreements. See the NOTICE file
+004 * distributed with this work for
additional information
+005 * regarding copyright ownership. The
ASF licenses this file
+006 * to you under the Apache License,
Version 2.0 (the
+007 * "License"); you may not use this file
except in compliance
+008 * with the License. You may obtain a
copy of the License at
+009 *
+010 *
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or
agreed to in writing, software
+013 * distributed under the License is
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
+015 * See the License for the specific
language governing permissions and
+016 * limitations under the License.
+017 */
+018package
org.apache.hadoop.hbase.master.assignment;
019
-020package
org.apache.hadoop.hbase.master.assignment;
-021
-022import java.io.IOException;
-023import java.util.Collections;
-024import java.util.List;
-025import org.apache.hadoop.hbase.Cell;
-026import
org.apache.hadoop.hbase.CellBuilderFactory;
-027import
org.apache.hadoop.hbase.CellBuilderType;
-028import
org.apache.hadoop.hbase.HConstants;
-029import
org.apache.hadoop.hbase.HRegionLocation;
-030import
org.apache.hadoop.hbase.MetaTableAccessor;
-031import
org.apache.hadoop.hbase.RegionLocations;
-032import
org.apache.hadoop.hbase.ServerName;
-033import
org.apache.hadoop.hbase.TableName;
-034import
org.apache.hadoop.hbase.client.Put;
-035import
org.apache.hadoop.hbase.client.RegionInfo;
-036import
org.apache.hadoop.hbase.client.Result;
-037import
org.apache.hadoop.hbase.client.Table;
-038import
org.apache.hadoop.hbase.client.TableDescriptor;
-039import
org.apache.hadoop.hbase.master.MasterServices;
-040import
org.apache.hadoop.hbase.master.RegionState.State;
-041import
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-042import
org.apache.hadoop.hbase.util.Bytes;
-043import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+020import java.io.IOException;
+021import java.util.Collections;
+022import java.util.List;
+023import org.apache.hadoop.hbase.Cell;
+024import
org.apache.hadoop.hbase.CellBuilderFactory;
+025import
org.apache.hadoop.hbase.CellBuilderType;
+026import
org.apache.hadoop.hbase.HConstants;
+027import
org.apache.hadoop.hbase.HRegionLocation;
+028import
org.apache.hadoop.hbase.MetaTableAccessor;
+029import
org.apache.hadoop.hbase.RegionLocations;
+030import
org.apache.hadoop.hbase.ServerName;
+031import
org.apache.hadoop.hbase.TableName;
+032import
org.apache.hadoop.hbase.client.Put;
+033import
org.apache.hadoop.hbase.client.RegionInfo;
+034import
org.apache.hadoop.hbase.client.Result;
+035import
org.apache.hadoop.hbase.client.Table;
+036import
org.apache.hadoop.hbase.client.TableDescriptor;
+037import
org.apache.hadoop.hbase.master.MasterFileSystem;
+038import
org.apache.hadoop.hbase.master.MasterServices;
+039import
org.apache.hadoop.hbase.master.RegionState.State;
+040import
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+041import
org.apache.hadoop.hbase.util.Bytes;
+042import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+043import
org.apache.hadoop.hbase.wal.WALSplitter;
044import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
index 04248c5..81c7c35 100644
--- a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
@@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
org.apache.hadoop.hbase.client.TableDescriptorBuilder
@@ -110,8 +110,8 @@ var activeTableTab = "activeTableTab";
@InterfaceAudience.Public
-public class TableDescriptorBuilder
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
+public class TableDescriptorBuilder
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Since:
2.0.0
@@ -198,18 +198,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TableDescriptorBuilder
-addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringclassName)
+addCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringclassName)
TableDescriptorBuilder
-addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringclassName,
+addCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringclassName,
org.apache.hadoop.fs.PathjarFilePath,
intpriority,
- http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">Stringkvs)
+ https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">Stringkvs)
TableDescriptorBuilder
-addCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringspecStr)
+addCoprocessorWithSpec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringspecStr)
TableDescriptor
@@ -250,7 +250,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TableDescriptorBuilder
-removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringclassName)
+removeCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">StringclassName)
TableDescriptorBuilder
@@ -270,7 +270,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TableDescriptorBuilder
-setFlushPolicyClassName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">Stringclazz)
+setFlushPolicyClassName(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">Stringclazz)
TableDescriptorBuilder
@@ -292,7 +292,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TableDescriptorBuilder
-setOwnerString(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringownerString)
+setOwnerString(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringownerString)
Deprecated.
@@ -314,7 +314,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TableDescriptorBuilder
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 7edb3ff..665071c 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -1221,2378 +1221,2377 @@
1213
configurationManager.registerObserver(procEnv);
1214
1215int cpus =
Runtime.getRuntime().availableProcessors();
-1216final int numThreads =
conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
-1217Math.max((cpus 0? cpus/4:
0),
-1218
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
-1219final boolean abortOnCorruption =
conf.getBoolean(
-1220
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
-1221
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
-1222procedureStore.start(numThreads);
-1223procedureExecutor.start(numThreads,
abortOnCorruption);
-1224
procEnv.getRemoteDispatcher().start();
-1225 }
-1226
-1227 private void stopProcedureExecutor()
{
-1228if (procedureExecutor != null) {
-1229
configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
-1230
procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
-1231 procedureExecutor.stop();
-1232 procedureExecutor.join();
-1233 procedureExecutor = null;
-1234}
-1235
-1236if (procedureStore != null) {
-1237
procedureStore.stop(isAborted());
-1238 procedureStore = null;
-1239}
-1240 }
-1241
-1242 private void stopChores() {
-1243if (this.expiredMobFileCleanerChore
!= null) {
-1244
this.expiredMobFileCleanerChore.cancel(true);
-1245}
-1246if (this.mobCompactChore != null)
{
-1247
this.mobCompactChore.cancel(true);
-1248}
-1249if (this.balancerChore != null) {
-1250 this.balancerChore.cancel(true);
-1251}
-1252if (this.normalizerChore != null)
{
-1253
this.normalizerChore.cancel(true);
-1254}
-1255if (this.clusterStatusChore != null)
{
-1256
this.clusterStatusChore.cancel(true);
-1257}
-1258if (this.catalogJanitorChore !=
null) {
-1259
this.catalogJanitorChore.cancel(true);
-1260}
-1261if (this.clusterStatusPublisherChore
!= null){
-1262
clusterStatusPublisherChore.cancel(true);
-1263}
-1264if (this.mobCompactThread != null)
{
-1265 this.mobCompactThread.close();
-1266}
-1267
-1268if (this.quotaObserverChore != null)
{
-1269 quotaObserverChore.cancel();
-1270}
-1271if (this.snapshotQuotaChore != null)
{
-1272 snapshotQuotaChore.cancel();
-1273}
-1274 }
-1275
-1276 /**
-1277 * @return Get remote side's
InetAddress
-1278 */
-1279 InetAddress getRemoteInetAddress(final
int port,
-1280 final long serverStartCode) throws
UnknownHostException {
-1281// Do it out here in its own little
method so can fake an address when
-1282// mocking up in tests.
-1283InetAddress ia =
RpcServer.getRemoteIp();
-1284
-1285// The call could be from the local
regionserver,
-1286// in which case, there is no remote
address.
-1287if (ia == null
serverStartCode == startcode) {
-1288 InetSocketAddress isa =
rpcServices.getSocketAddress();
-1289 if (isa != null
isa.getPort() == port) {
-1290ia = isa.getAddress();
-1291 }
-1292}
-1293return ia;
-1294 }
-1295
-1296 /**
-1297 * @return Maximum time we should run
balancer for
-1298 */
-1299 private int getMaxBalancingTime() {
-1300int maxBalancingTime =
getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
-1301if (maxBalancingTime == -1) {
-1302 // if max balancing time isn't
set, defaulting it to period time
-1303 maxBalancingTime =
getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD,
-1304
HConstants.DEFAULT_HBASE_BALANCER_PERIOD);
-1305}
-1306return maxBalancingTime;
-1307 }
-1308
-1309 /**
-1310 * @return Maximum number of regions
in transition
-1311 */
-1312 private int
getMaxRegionsInTransition() {
-1313int numRegions =
this.assignmentManager.getRegionStates().getRegionAssignments().size();
-1314return Math.max((int)
Math.floor(numRegions * this.maxRitPercent), 1);
-1315 }
-1316
-1317 /**
-1318 * It first sleep to the next balance
plan start time. Meanwhile, throttling by the max
-1319 * number regions in transition to
protect availability.
-1320 * @param nextBalanceStartTime The
next balance plan start time
-1321 * @param maxRegionsInTransition max
number of regions in transition
-1322 * @param
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 802b925..a3e80ab 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -73,229 +73,229 @@
065import
java.util.concurrent.TimeoutException;
066import
java.util.concurrent.atomic.AtomicBoolean;
067import
java.util.concurrent.atomic.AtomicInteger;
-068import
java.util.concurrent.atomic.AtomicLong;
-069import
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import
java.util.concurrent.locks.ReadWriteLock;
-072import
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import
org.apache.hadoop.hbase.CellBuilderType;
-081import
org.apache.hadoop.hbase.CellComparator;
-082import
org.apache.hadoop.hbase.CellComparatorImpl;
-083import
org.apache.hadoop.hbase.CellScanner;
-084import
org.apache.hadoop.hbase.CellUtil;
-085import
org.apache.hadoop.hbase.CompareOperator;
-086import
org.apache.hadoop.hbase.CompoundConfiguration;
-087import
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import
org.apache.hadoop.hbase.HConstants;
-091import
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import
org.apache.hadoop.hbase.HRegionInfo;
-094import
org.apache.hadoop.hbase.KeyValue;
-095import
org.apache.hadoop.hbase.KeyValueUtil;
-096import
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import
org.apache.hadoop.hbase.NotServingRegionException;
-098import
org.apache.hadoop.hbase.PrivateCellUtil;
-099import
org.apache.hadoop.hbase.RegionTooBusyException;
-100import
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import
org.apache.hadoop.hbase.UnknownScannerException;
-104import
org.apache.hadoop.hbase.client.Append;
-105import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import
org.apache.hadoop.hbase.client.CompactionState;
-107import
org.apache.hadoop.hbase.client.Delete;
-108import
org.apache.hadoop.hbase.client.Durability;
-109import
org.apache.hadoop.hbase.client.Get;
-110import
org.apache.hadoop.hbase.client.Increment;
-111import
org.apache.hadoop.hbase.client.IsolationLevel;
-112import
org.apache.hadoop.hbase.client.Mutation;
-113import
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import
org.apache.hadoop.hbase.client.Put;
-115import
org.apache.hadoop.hbase.client.RegionInfo;
-116import
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import
org.apache.hadoop.hbase.client.Result;
-118import
org.apache.hadoop.hbase.client.RowMutations;
-119import
org.apache.hadoop.hbase.client.Scan;
-120import
org.apache.hadoop.hbase.client.TableDescriptor;
-121import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import
org.apache.hadoop.hbase.io.HFileLink;
-133import
org.apache.hadoop.hbase.io.HeapSize;
-134import
org.apache.hadoop.hbase.io.TimeRange;
-135import
org.apache.hadoop.hbase.io.hfile.HFile;
-136import
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import
org.apache.hadoop.hbase.ipc.RpcCall;
-139import
org.apache.hadoop.hbase.ipc.RpcServer;
-140import
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 24aa34d..f4d6287 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -5809,6 +5809,8 @@
CLASS_RULE
- Static variable in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication
+CLASS_RULE
- Static variable in class org.apache.hadoop.hbase.security.token.TestTokenUtil
+
CLASS_RULE
- Static variable in class org.apache.hadoop.hbase.security.token.TestZKSecretWatcher
CLASS_RULE
- Static variable in class org.apache.hadoop.hbase.security.token.TestZKSecretWatcherRefreshKeys
@@ -25261,8 +25263,6 @@
MyAsyncProcess(ClusterConnection,
Configuration, AtomicInteger) - Constructor for class
org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcess
-MyAsyncProcess(ClusterConnection,
Configuration, boolean) - Constructor for class
org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcess
-
MyAsyncProcess(ClusterConnection,
Configuration) - Constructor for class
org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException.MyAsyncProcess
MyAsyncProcessWithReplicas(ClusterConnection,
Configuration) - Constructor for class
org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcessWithReplicas
@@ -47436,6 +47436,8 @@
testErrorPropagation()
- Method in class org.apache.hadoop.hbase.procedure.TestProcedure
+testErrors()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
+
testErrorsServers()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
testEscape()
- Method in class org.apache.hadoop.hbase.util.TestJRubyFormat
@@ -47662,6 +47664,8 @@
testFailedPut()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException
+testFailedPutAndNewPut()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
+
testFailedPutWithoutActionException()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException
testFailedServer()
- Method in class org.apache.hadoop.hbase.ipc.TestHBaseClient
@@ -49124,8 +49128,6 @@
testGlobalAuthorizationForNewRegisteredRS()
- Method in class org.apache.hadoop.hbase.security.access.TestAccessController
-testGlobalErrors()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
-
TestGlobalFilter - Class in org.apache.hadoop.hbase.http
TestGlobalFilter()
- Constructor for class org.apache.hadoop.hbase.http.TestGlobalFilter
@@ -49906,8 +49908,6 @@
testHTableExistsMethodSingleRegionSingleGet()
- Method in class org.apache.hadoop.hbase.client.TestFromClientSide3
-testHTableFailedPutAndNewPut()
- Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
-
testHTableInterfaceMethods()
- Method in class org.apache.hadoop.hbase.coprocessor.TestAppendTimeRange
testHTableInterfaceMethods()
- Method in class org.apache.hadoop.hbase.coprocessor.TestIncrementTimeRange
@@ -53690,6 +53690,8 @@
testObserverAddedByDefault()
- Method in class org.apache.hadoop.hbase.quotas.TestMasterSpaceQuotaObserver
+testObtainToken()
- Method in class org.apache.hadoop.hbase.security.token.TestTokenUtil
+
testOddSizedBlocks()
- Method in class org.apache.hadoop.hbase.io.crypto.TestEncryption
testOfferInStealJobQueueShouldUnblock()
- Method in class org.apache.hadoop.hbase.util.TestStealJobQueue
@@ -60680,6 +60682,10 @@
testTokenCreation()
- Method in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication
+TestTokenUtil - Class in org.apache.hadoop.hbase.security.token
+
+TestTokenUtil()
- Constructor for class org.apache.hadoop.hbase.security.token.TestTokenUtil
+
testToLong()
- Method in class org.apache.hadoop.hbase.util.TestBytes
testTooBigEntry()
- Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketWriterThread
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
index 55d0251..2dc7e47 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
@@ -153,7 +153,7 @@ extends org.apache.hadoop.hbase.master.HMaster
Nested classes/interfaces inherited from
classorg.apache.hadoop.hbase.master.HMaster
-org.apache.hadoop.hbase.master.HMaster.RedirectServlet
+org.apache.hadoop.hbase.master.HMaster.MasterStoppedException,
org.apache.hadoop.hbase.master.HMaster.RedirectServlet
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
index 75077a6..5da3ef8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
@@ -521,33 +521,33 @@
-org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearCompactedFiles()
-
-
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
DefaultStoreFileManager.clearCompactedFiles()
-
+
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
StoreFileManager.clearCompactedFiles()
Clears all the compacted files and returns them.
-
+
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearFiles()
+StripeStoreFileManager.clearCompactedFiles()
-
+
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
DefaultStoreFileManager.clearFiles()
-
+
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
StoreFileManager.clearFiles()
Clears all the files currently in use and returns
them.
+
+org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
+StripeStoreFileManager.clearFiles()
+
http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListHStoreFile
HRegion.close()
@@ -597,36 +597,36 @@
http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
title="class or interface in java.util">IteratorHStoreFile
-StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
- for details on this methods.
-
+DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
title="class or interface in java.util">IteratorHStoreFile
-DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+Gets initial, full list of candidate store files to check
for row-key-before.
+
http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
title="class or interface in java.util">IteratorHStoreFile
-StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-Gets initial, full list of candidate store files to check
for row-key-before.
+StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
+ for details on this methods.
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getCompactedfiles()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
DefaultStoreFileManager.getCompactedfiles()
-
+
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
StoreFileManager.getCompactedfiles()
List of compacted files inside this store that needs to be
excluded in reads
because further new reads will be using only the newly created files out of
compaction.
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
+StripeStoreFileManager.getCompactedfiles()
+
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
HStore.getCompactedFiles()
@@ -637,26 +637,26 @@
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getFilesForScan(byte[]startRow,
+DefaultStoreFileManager.getFilesForScan(byte[]startRow,
booleanincludeStartRow,
byte[]stopRow,
booleanincludeStopRow)
http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
title="class or interface in
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
index 09f9400..c864853 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
@@ -156,11 +156,11 @@
MetricsMaster
-HMaster.getMasterMetrics()
+MasterServices.getMasterMetrics()
MetricsMaster
-MasterServices.getMasterMetrics()
+HMaster.getMasterMetrics()
@@ -212,17 +212,17 @@
+void
+MasterProcedureManagerHost.initialize(MasterServicesmaster,
+ MetricsMastermetricsMaster)
+
+
abstract void
MasterProcedureManager.initialize(MasterServicesmaster,
MetricsMastermetricsMaster)
Initialize a globally barriered procedure for master.
-
-void
-MasterProcedureManagerHost.initialize(MasterServicesmaster,
- MetricsMastermetricsMaster)
-
@@ -258,11 +258,11 @@
private MetricsMaster
-SnapshotQuotaObserverChore.metrics
+QuotaObserverChore.metrics
private MetricsMaster
-QuotaObserverChore.metrics
+SnapshotQuotaObserverChore.metrics
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
index de9d1bc..814497c 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
@@ -139,29 +139,29 @@
-MetricsMasterProcSource
-MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
+MetricsMasterSource
+MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
MetricsMasterQuotaSource
MetricsMasterQuotaSourceFactory.create(MetricsMasterWrappermasterWrapper)
-MetricsMasterSource
-MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
+MetricsMasterProcSource
+MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
MetricsMasterQuotaSource
MetricsMasterQuotaSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
-MetricsMasterSource
-MetricsMasterSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
-
-
MetricsMasterProcSource
MetricsMasterProcSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
+
+MetricsMasterSource
+MetricsMasterSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
index 5216a4d..c91791d 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
@@ -116,11 +116,11 @@
private RackManager
-FavoredNodeLoadBalancer.rackManager
+FavoredNodesManager.rackManager
private RackManager
-FavoredNodesManager.rackManager
+FavoredNodeLoadBalancer.rackManager
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
index 146b426..d2c9cca 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
@@ -282,10 +282,7 @@
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
-SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterMap)
-Generate a global load balancing plan according to the
specified map of
- server information to the most loaded regions of each server.
-
+FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index ed15d9b..3d03e17 100644
---
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -248,7 +248,7 @@ the order they are declared.
values
-public staticWALProcedureStore.PushType[]values()
+public staticWALProcedureStore.PushType[]values()
Returns an array containing the constants of this enum
type, in
the order they are declared. This method may be used to iterate
over the constants as follows:
@@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c :
WALProcedureStore.PushType.values())
valueOf
-public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
+public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
Returns the enum constant of this type with the specified
name.
The string must match exactly an identifier used to declare an
enum constant in this type. (Extraneous whitespace characters are
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
index c6f6a46..5bd2115 100644
---
a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
+++
b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
@@ -141,11 +141,11 @@
private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
-ProcedureExecutor.TimeoutExecutorThread.queue
+RemoteProcedureDispatcher.TimeoutExecutorThread.queue
private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
-RemoteProcedureDispatcher.TimeoutExecutorThread.queue
+ProcedureExecutor.TimeoutExecutorThread.queue
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
index 934c2fa..dd6045b 100644
---
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
+++
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
@@ -125,11 +125,11 @@
MasterQuotaManager
-MasterServices.getMasterQuotaManager()
+HMaster.getMasterQuotaManager()
MasterQuotaManager
-HMaster.getMasterQuotaManager()
+MasterServices.getMasterQuotaManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
index a495cd1..d81fa5e 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
@@ -110,9 +110,7 @@
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListQuotaSettings
-AsyncAdmin.getQuota(QuotaFilterfilter)
-List the quotas based on the filter.
-
+AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListQuotaSettings
@@ -121,16 +119,18 @@
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
index 399dc36..5436db5 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
@@ -18,8 +18,8 @@
catch(err) {
}
//-->
-var methods =
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods =
{"i0":42,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":42,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
PrevClass
-NextClass
+NextClass
Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
Summary:
-Nested|
+Nested|
Field|
Constr|
Method
@@ -107,13 +107,18 @@ var activeTableTab = "activeTableTab";
+
+Direct Known Subclasses:
+MirroringTableStateManager
+
@InterfaceAudience.Private
-public class TableStateManager
+public class TableStateManager
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
This is a helper class used to manage table states.
- States persisted in tableinfo and cached internally.
+ This class uses hbase:meta as its store for table state so hbase:meta must be
online before
+ start()
is called.
TODO: Cache state. Cut down on meta looksups.
@@ -121,6 +126,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+TableStateManager.TableStateNotFoundException
+
+
+
+
@@ -134,7 +158,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Field and Description
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
title="class or interface in
java.util.concurrent.locks">ReadWriteLock
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
title="class or interface in
java.util.concurrent.locks">ReadWriteLock
lock
@@ -142,9 +166,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
LOG
-private MasterServices
+(package private) MasterServices
master
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
+MIGRATE_TABLE_STATE_FROM_ZK_KEY
+Set this key to false in Configuration to disable migrating
table state from zookeeper
+ so hbase:meta table.
+
+
@@ -172,51 +203,73 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Method Summary
-All MethodsStatic MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
Modifier and Type
Method and Description
-static void
+protected void
+deleteZooKeeper(TableNametableName)
+Deprecated.
+Since 2.0.0. To be removed
in hbase-3.0.0.
+
+
+
+
+protected void
+fixTableState(TableStatetableState)
+For subclasses in case they want to do fixup post
hbase:meta.
+
+
+
+private void
fixTableStates(TableDescriptorstableDescriptors,
Connectionconnection)
-
+
http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetTableName
getTablesInStates(TableState.State...states)
Return all tables in given states.
-
+
TableState.State
getTableState(TableNametableName)
-
+
boolean
isTablePresent(TableNametableName)
-
+
boolean
isTableState(TableNametableName,
TableState.State...states)
-
+
+private void
+migrateZooKeeper()
+Deprecated.
+Since 2.0.0. Remove in
hbase-3.0.0.
+
+
+
+
protected TableState
readMetaState(TableNametableName)
-
+
void
setDeletedTable(TableNametableName)
-
+
void
setTableState(TableNametableName,
TableState.StatenewState)
Set table state to provided.
-
+
TableState.State
setTableStateIfInStates(TableNametableName,
TableState.StatenewState,
@@ -225,7 +278,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Caller should lock table on write.
-
+
boolean
setTableStateIfNotInStates(TableNametableName,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index ed15d9b..3d03e17 100644
---
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -248,7 +248,7 @@ the order they are declared.
values
-public staticWALProcedureStore.PushType[]values()
+public staticWALProcedureStore.PushType[]values()
Returns an array containing the constants of this enum
type, in
the order they are declared. This method may be used to iterate
over the constants as follows:
@@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c :
WALProcedureStore.PushType.values())
valueOf
-public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
+public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
Returns the enum constant of this type with the specified
name.
The string must match exactly an identifier used to declare an
enum constant in this type. (Extraneous whitespace characters are
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
index c6f6a46..5bd2115 100644
---
a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
+++
b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
@@ -141,11 +141,11 @@
private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
-ProcedureExecutor.TimeoutExecutorThread.queue
+RemoteProcedureDispatcher.TimeoutExecutorThread.queue
private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
-RemoteProcedureDispatcher.TimeoutExecutorThread.queue
+ProcedureExecutor.TimeoutExecutorThread.queue
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
index 934c2fa..dd6045b 100644
---
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
+++
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
@@ -125,11 +125,11 @@
MasterQuotaManager
-MasterServices.getMasterQuotaManager()
+HMaster.getMasterQuotaManager()
MasterQuotaManager
-HMaster.getMasterQuotaManager()
+MasterServices.getMasterQuotaManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
index a495cd1..d81fa5e 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
@@ -110,9 +110,7 @@
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListQuotaSettings
-AsyncAdmin.getQuota(QuotaFilterfilter)
-List the quotas based on the filter.
-
+AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListQuotaSettings
@@ -121,16 +119,18 @@
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 42f8bc2..82c1efb 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
@InterfaceAudience.Private
-public class HStore
+public class HStore
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
A Store holds a column family in a Region. Its a memstore
and a set of zero
@@ -218,11 +218,11 @@ implements COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
compactedCellsCount
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
compactedCellsSize
@@ -278,15 +278,15 @@ implements FIXED_OVERHEAD
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
flushedCellsCount
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
flushedCellsSize
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
flushedOutputFileSize
@@ -316,11 +316,11 @@ implements LOG
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
majorCompactedCellsCount
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
majorCompactedCellsSize
@@ -356,11 +356,11 @@ implements storeEngine
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
storeSize
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicLong
+private long
totalUncompressedBytes
@@ -1226,7 +1226,7 @@ implements
MEMSTORE_CLASS_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
See Also:
Constant
Field Values
@@ -1239,7 +1239,7 @@ implements
COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
See Also:
Constant
Field Values
@@ -1252,7 +1252,7 @@ implements
BLOCKING_STOREFILES_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
See Also:
Constant
Field Values
@@ -1265,7 +1265,7 @@ implements
BLOCK_STORAGE_POLICY_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
See Also:
Constant
Field Values
@@ -1278,7 +1278,7 @@ implements
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
index 2939a56..681e263 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
@@ -61,602 +61,608 @@
053import
org.apache.hadoop.hbase.monitoring.TaskMonitor;
054import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
055import
org.apache.hadoop.hbase.util.FSUtils;
-056import
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting
to the available region servers.
-064 * Coordination happens via coordination
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race
to grab a task.
-066 *
-067 * pSplitLogManager monitors the
tasks that it creates using the
-068 * timeoutMonitor thread. If a task's
progress is slow then
-069 * {@link
SplitLogManagerCoordination#checkTasks} will take away the
-070 * task from the owner {@link
org.apache.hadoop.hbase.regionserver.SplitLogWorker}
-071 * and the task will be up for grabs
again. When the task is done then it is
-072 * deleted by SplitLogManager.
-073 *
-074 * pClients call {@link
#splitLogDistributed(Path)} to split a region server's
-075 * log files. The caller thread waits in
this method until all the log files
-076 * have been split.
-077 *
-078 * pAll the coordination calls
made by this class are asynchronous. This is mainly
-079 * to help reduce response time seen by
the callers.
-080 *
-081 * pThere is race in this design
between the SplitLogManager and the
-082 * SplitLogWorker. SplitLogManager might
re-queue a task that has in reality
-083 * already been completed by a
SplitLogWorker. We rely on the idempotency of
-084 * the log splitting task for
correctness.
-085 *
-086 * pIt is also assumed that every
log splitting task is unique and once
-087 * completed (either with success or with
error) it will be not be submitted
-088 * again. If a task is resubmitted then
there is a risk that old "delete task"
-089 * can delete the re-submission.
-090 */
-091@InterfaceAudience.Private
-092public class SplitLogManager {
-093 private static final Logger LOG =
LoggerFactory.getLogger(SplitLogManager.class);
-094
-095 private final MasterServices server;
-096
-097 private final Configuration conf;
-098 private final ChoreService
choreService;
-099
-100 public static final int
DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
-101
-102 private long unassignedTimeout;
-103 private long lastTaskCreateTime =
Long.MAX_VALUE;
-104
-105 @VisibleForTesting
-106 final ConcurrentMapString, Task
tasks = new ConcurrentHashMap();
-107 private TimeoutMonitor
timeoutMonitor;
-108
-109 private volatile SetServerName
deadWorkers = null;
-110 private final Object deadWorkersLock =
new Object();
-111
-112 /**
-113 * Its OK to construct this object even
when region-servers are not online. It does lookup the
-114 * orphan tasks in coordination engine
but it doesn't block waiting for them to be done.
-115 * @param master the master services
-116 * @param conf the HBase
configuration
-117 * @throws IOException
-118 */
-119 public SplitLogManager(MasterServices
master, Configuration conf)
-120 throws IOException {
-121this.server = master;
-122this.conf = conf;
-123this.choreService = new
ChoreService(master.getServerName() + "_splitLogManager_");
-124if
(server.getCoordinatedStateManager() != null) {
-125 SplitLogManagerCoordination
coordination = getSplitLogManagerCoordination();
-126 SetString failedDeletions =
Collections.synchronizedSet(new HashSetString());
-127 SplitLogManagerDetails details =
new SplitLogManagerDetails(tasks, master, failedDeletions);
-128 coordination.setDetails(details);
-129 coordination.init();
-130}
-131this.unassignedTimeout =
-132
conf.getInt("hbase.splitlog.manager.unassigned.timeout",
DEFAULT_UNASSIGNED_TIMEOUT);
-133this.timeoutMonitor =
-134new
TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period",
1000),
-135master);
-136
choreService.scheduleChore(timeoutMonitor);
-137 }
-138
-139 private SplitLogManagerCoordination
getSplitLogManagerCoordination() {
-140return
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index b8e6dfa..7b512ba 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -28,8473 +28,8472 @@
020import static
org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
021import static
org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
022import static
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-023import java.io.EOFException;
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.io.InterruptedIOException;
-027import java.lang.reflect.Constructor;
-028import java.nio.ByteBuffer;
-029import
java.nio.charset.StandardCharsets;
-030import java.text.ParseException;
-031import java.util.AbstractList;
-032import java.util.ArrayList;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Collections;
-036import java.util.HashMap;
-037import java.util.HashSet;
-038import java.util.Iterator;
-039import java.util.List;
-040import java.util.Map;
-041import java.util.Map.Entry;
-042import java.util.NavigableMap;
-043import java.util.NavigableSet;
-044import java.util.Optional;
-045import java.util.RandomAccess;
-046import java.util.Set;
-047import java.util.TreeMap;
-048import java.util.UUID;
-049import java.util.concurrent.Callable;
-050import
java.util.concurrent.CompletionService;
-051import
java.util.concurrent.ConcurrentHashMap;
-052import
java.util.concurrent.ConcurrentMap;
-053import
java.util.concurrent.ConcurrentSkipListMap;
-054import
java.util.concurrent.ExecutionException;
-055import
java.util.concurrent.ExecutorCompletionService;
-056import
java.util.concurrent.ExecutorService;
-057import java.util.concurrent.Executors;
-058import java.util.concurrent.Future;
-059import java.util.concurrent.FutureTask;
-060import
java.util.concurrent.ThreadFactory;
-061import
java.util.concurrent.ThreadPoolExecutor;
-062import java.util.concurrent.TimeUnit;
-063import
java.util.concurrent.TimeoutException;
-064import
java.util.concurrent.atomic.AtomicBoolean;
-065import
java.util.concurrent.atomic.AtomicInteger;
-066import
java.util.concurrent.atomic.AtomicLong;
-067import
java.util.concurrent.atomic.LongAdder;
-068import java.util.concurrent.locks.Lock;
-069import
java.util.concurrent.locks.ReadWriteLock;
-070import
java.util.concurrent.locks.ReentrantReadWriteLock;
-071import java.util.function.Function;
-072
-073import
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import
org.apache.hadoop.hbase.CellBuilderType;
-080import
org.apache.hadoop.hbase.CellComparator;
-081import
org.apache.hadoop.hbase.CellComparatorImpl;
-082import
org.apache.hadoop.hbase.CellScanner;
-083import
org.apache.hadoop.hbase.CellUtil;
-084import
org.apache.hadoop.hbase.CompareOperator;
-085import
org.apache.hadoop.hbase.CompoundConfiguration;
-086import
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import
org.apache.hadoop.hbase.HConstants;
-090import
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import
org.apache.hadoop.hbase.HRegionInfo;
-093import
org.apache.hadoop.hbase.KeyValue;
-094import
org.apache.hadoop.hbase.KeyValueUtil;
-095import
org.apache.hadoop.hbase.NamespaceDescriptor;
-096import
org.apache.hadoop.hbase.NotServingRegionException;
-097import
org.apache.hadoop.hbase.PrivateCellUtil;
-098import
org.apache.hadoop.hbase.RegionTooBusyException;
-099import
org.apache.hadoop.hbase.TableName;
-100import org.apache.hadoop.hbase.Tag;
-101import org.apache.hadoop.hbase.TagUtil;
-102import
org.apache.hadoop.hbase.UnknownScannerException;
-103import
org.apache.hadoop.hbase.client.Append;
-104import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-105import
org.apache.hadoop.hbase.client.CompactionState;
-106import
org.apache.hadoop.hbase.client.Delete;
-107import
org.apache.hadoop.hbase.client.Durability;
-108import
org.apache.hadoop.hbase.client.Get;
-109import
org.apache.hadoop.hbase.client.Increment;
-110import
org.apache.hadoop.hbase.client.IsolationLevel;
-111import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
index 1981ca2..8b7b594 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
-public class TestFromClientSide
+public class TestFromClientSide
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Run tests that use the HBase clients; Table.
Sets up the HBase mini cluster once at start and runs through all client
tests.
@@ -137,38 +137,42 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Field and Description
+static HBaseClassTestRule
+CLASS_RULE
+
+
private static byte[]
FAMILY
-
+
private static byte[]
INVALID_FAMILY
-
+
private static org.slf4j.Logger
LOG
-
+
org.junit.rules.TestName
name
-
+
private static byte[]
QUALIFIER
-
+
private static byte[]
ROW
-
+
protected static int
SLAVES
-
+
protected static HBaseTestingUtility
TEST_UTIL
-
+
private static byte[]
VALUE
@@ -1039,13 +1043,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
@@ -1054,7 +1067,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TEST_UTIL
-protected static finalHBaseTestingUtility TEST_UTIL
+protected static finalHBaseTestingUtility TEST_UTIL
@@ -1063,7 +1076,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
ROW
-private staticbyte[] ROW
+private staticbyte[] ROW
@@ -1072,7 +1085,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
FAMILY
-private staticbyte[] FAMILY
+private staticbyte[] FAMILY
@@ -1081,7 +1094,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
INVALID_FAMILY
-private static finalbyte[] INVALID_FAMILY
+private static finalbyte[] INVALID_FAMILY
@@ -1090,7 +1103,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
QUALIFIER
-private staticbyte[] QUALIFIER
+private staticbyte[] QUALIFIER
@@ -1099,7 +1112,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
VALUE
-private staticbyte[] VALUE
+private staticbyte[] VALUE
@@ -1108,7 +1121,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
SLAVES
-protected staticint SLAVES
+protected staticint SLAVES
@@ -1117,7 +1130,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
@@ -1134,7 +1147,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TestFromClientSide
-publicTestFromClientSide()
+publicTestFromClientSide()
@@ -1151,7 +1164,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -1165,7 +1178,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
tearDownAfterClass
-public staticvoidtearDownAfterClass()
+public staticvoidtearDownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -1179,7 +1192,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testDuplicateAppend
-publicvoidtestDuplicateAppend()
+publicvoidtestDuplicateAppend()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Test append result when there are duplicate rpc
request.
@@ -1194,7 +1207,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testKeepDeletedCells
-publicvoidtestKeepDeletedCells()
+publicvoidtestKeepDeletedCells()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Basic client side validation of HBASE-4536
@@ -1209,7 +1222,7 @@ extends
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
index 4277d0a..36dbc3c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
@@ -44,36 +44,36 @@
036import
org.apache.hadoop.hbase.backup.BackupRestoreFactory;
037import
org.apache.hadoop.hbase.backup.HBackupFileSystem;
038import
org.apache.hadoop.hbase.backup.RestoreJob;
-039import
org.apache.yetus.audience.InterfaceAudience;
-040import org.slf4j.Logger;
-041import org.slf4j.LoggerFactory;
-042import
org.apache.hadoop.hbase.client.Admin;
-043import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-044import
org.apache.hadoop.hbase.client.Connection;
-045import
org.apache.hadoop.hbase.client.TableDescriptor;
-046import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-047import
org.apache.hadoop.hbase.io.HFileLink;
-048import
org.apache.hadoop.hbase.io.hfile.HFile;
+039import
org.apache.hadoop.hbase.client.Admin;
+040import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+041import
org.apache.hadoop.hbase.client.Connection;
+042import
org.apache.hadoop.hbase.client.TableDescriptor;
+043import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+044import
org.apache.hadoop.hbase.io.HFileLink;
+045import
org.apache.hadoop.hbase.io.hfile.HFile;
+046import
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+047import
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+048import
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
049import
org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-050import
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-051import
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-052import
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-053import
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-054import
org.apache.hadoop.hbase.util.Bytes;
-055import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-056import
org.apache.hadoop.hbase.util.FSTableDescriptors;
-057
-058/**
-059 * A collection for methods used by
multiple classes to restore HBase tables.
-060 */
-061@InterfaceAudience.Private
-062public class RestoreTool {
-063
+050import
org.apache.hadoop.hbase.util.Bytes;
+051import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+052import
org.apache.hadoop.hbase.util.FSTableDescriptors;
+053import
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+058
+059/**
+060 * A collection for methods used by
multiple classes to restore HBase tables.
+061 */
+062@InterfaceAudience.Private
+063public class RestoreTool {
064 public static final Logger LOG =
LoggerFactory.getLogger(BackupUtils.class);
065 private final static long
TABLE_AVAILABILITY_WAIT_TIME = 18;
066
067 private final String[] ignoreDirs = {
HConstants.RECOVERED_EDITS_DIR };
-068 protected Configuration conf = null;
+068 protected Configuration conf;
069 protected Path backupRootPath;
070 protected String backupId;
071 protected FileSystem fs;
@@ -97,433 +97,426 @@
089 * @throws IOException exception
090 */
091 Path getTableArchivePath(TableName
tableName) throws IOException {
-092
-093Path baseDir =
-094new
Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath,
backupId),
-095
HConstants.HFILE_ARCHIVE_DIRECTORY);
-096Path dataDir = new Path(baseDir,
HConstants.BASE_NAMESPACE_DIR);
-097Path archivePath = new Path(dataDir,
tableName.getNamespaceAsString());
-098Path tableArchivePath = new
Path(archivePath, tableName.getQualifierAsString());
-099if (!fs.exists(tableArchivePath) ||
!fs.getFileStatus(tableArchivePath).isDirectory()) {
-100 LOG.debug("Folder tableArchivePath:
" + tableArchivePath.toString() + " does not exists");
-101 tableArchivePath = null; // empty
table has no archive
-102}
-103return tableArchivePath;
-104 }
-105
-106 /**
-107 * Gets region list
-108 * @param tableName table name
-109 * @return RegionList region list
-110 * @throws FileNotFoundException
exception
-111 * @throws IOException exception
-112 */
-113 ArrayListPath
getRegionList(TableName tableName) throws FileNotFoundException, IOException
{
-114Path tableArchivePath =
getTableArchivePath(tableName);
-115ArrayListPath regionDirList =
new ArrayListPath();
-116FileStatus[] children =
fs.listStatus(tableArchivePath);
-117for (FileStatus
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
index 281c243..1a84ee1 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
@@ -152,433 +152,461 @@
144
145 /**
146 * Make puts to put the input value
into each combination of row, family, and qualifier
-147 * @param rows
-148 * @param families
-149 * @param qualifiers
-150 * @param value
-151 * @return
-152 * @throws IOException
-153 */
-154 static ArrayListPut
createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
-155 byte[] value) throws IOException
{
-156Put put;
-157ArrayListPut puts = new
ArrayList();
-158
-159for (int row = 0; row
rows.length; row++) {
-160 put = new Put(rows[row]);
-161 for (int fam = 0; fam
families.length; fam++) {
-162for (int qual = 0; qual
qualifiers.length; qual++) {
-163 KeyValue kv = new
KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
-164 put.add(kv);
-165}
-166 }
-167 puts.add(put);
-168}
-169
-170return puts;
-171 }
-172
-173 @AfterClass
-174 public static void tearDownAfterClass()
throws Exception {
-175TEST_UTIL.shutdownMiniCluster();
-176 }
-177
-178 @Before
-179 public void setupBeforeTest() throws
Exception {
-180disableSleeping();
-181 }
-182
-183 @After
-184 public void teardownAfterTest() throws
Exception {
-185disableSleeping();
-186 }
-187
-188 /**
-189 * Run the test callable when
heartbeats are enabled/disabled. We expect all tests to only pass
-190 * when heartbeat messages are enabled
(otherwise the test is pointless). When heartbeats are
-191 * disabled, the test should throw an
exception.
-192 * @param testCallable
-193 * @throws InterruptedException
-194 */
-195 private void
testImportanceOfHeartbeats(CallableVoid testCallable) throws
InterruptedException {
-196
HeartbeatRPCServices.heartbeatsEnabled = true;
-197
+147 */
+148 static ArrayListPut
createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
+149 byte[] value) throws IOException
{
+150Put put;
+151ArrayListPut puts = new
ArrayList();
+152
+153for (int row = 0; row
rows.length; row++) {
+154 put = new Put(rows[row]);
+155 for (int fam = 0; fam
families.length; fam++) {
+156for (int qual = 0; qual
qualifiers.length; qual++) {
+157 KeyValue kv = new
KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
+158 put.add(kv);
+159}
+160 }
+161 puts.add(put);
+162}
+163
+164return puts;
+165 }
+166
+167 @AfterClass
+168 public static void tearDownAfterClass()
throws Exception {
+169TEST_UTIL.shutdownMiniCluster();
+170 }
+171
+172 @Before
+173 public void setupBeforeTest() throws
Exception {
+174disableSleeping();
+175 }
+176
+177 @After
+178 public void teardownAfterTest() throws
Exception {
+179disableSleeping();
+180 }
+181
+182 /**
+183 * Run the test callable when
heartbeats are enabled/disabled. We expect all tests to only pass
+184 * when heartbeat messages are enabled
(otherwise the test is pointless). When heartbeats are
+185 * disabled, the test should throw an
exception.
+186 */
+187 private void
testImportanceOfHeartbeats(CallableVoid testCallable) throws
InterruptedException {
+188
HeartbeatRPCServices.heartbeatsEnabled = true;
+189
+190try {
+191 testCallable.call();
+192} catch (Exception e) {
+193 fail("Heartbeat messages are
enabled, exceptions should NOT be thrown. Exception trace:"
+194 +
ExceptionUtils.getStackTrace(e));
+195}
+196
+197
HeartbeatRPCServices.heartbeatsEnabled = false;
198try {
199 testCallable.call();
200} catch (Exception e) {
-201 fail("Heartbeat messages are
enabled, exceptions should NOT be thrown. Exception trace:"
-202 +
ExceptionUtils.getStackTrace(e));
-203}
-204
-205
HeartbeatRPCServices.heartbeatsEnabled = false;
-206try {
-207 testCallable.call();
-208} catch (Exception e) {
-209 return;
-210} finally {
-211
HeartbeatRPCServices.heartbeatsEnabled = true;
-212}
-213fail("Heartbeats messages are
disabled, an exception should be thrown. If an exception "
-214+ " is not thrown, the test case
is not testing the importance of heartbeat
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
index 358b5e2..5788cf7 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
@@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
close
-voidclose()
+voidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
Closes the scanner and releases any resources it has
allocated
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
index 4132807..5428e2f 100644
---
a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
+++
b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
-protected static class KeyValueHeap.KVScannerComparator
+protected static class KeyValueHeap.KVScannerComparator
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
title="class or interface in java.util">ComparatorKeyValueScanner
@@ -231,7 +231,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
kvComparator
-protectedCellComparator kvComparator
+protectedCellComparator kvComparator
@@ -248,7 +248,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
KVScannerComparator
-publicKVScannerComparator(CellComparatorkvComparator)
+publicKVScannerComparator(CellComparatorkvComparator)
Constructor
Parameters:
@@ -270,7 +270,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
compare
-publicintcompare(KeyValueScannerleft,
+publicintcompare(KeyValueScannerleft,
KeyValueScannerright)
Specified by:
@@ -284,7 +284,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
compare
-publicintcompare(Cellleft,
+publicintcompare(Cellleft,
Cellright)
Compares two KeyValue
@@ -302,7 +302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
getComparator
-publicCellComparatorgetComparator()
+publicCellComparatorgetComparator()
Returns:
KVComparator
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index 9195a40..67934f0 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -498,7 +498,7 @@ implements
peek
-publicCellpeek()
+publicCellpeek()
Description copied from
interface:KeyValueScanner
Look at the next Cell in this scanner, but do not iterate
scanner.
NOTICE: The returned cell has not been passed into ScanQueryMatcher. So it
may not be what the
@@ -517,7 +517,7 @@ implements
next
-publicCellnext()
+publicCellnext()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
Description copied from
interface:KeyValueScanner
Return the next Cell in this scanner, iterating the
scanner
@@ -537,7 +537,7 @@ implements
next
-publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListCellresult,
+publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListCellresult,
ScannerContextscannerContext)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
Gets the next row of keys from the top-most scanner.
@@ -564,7 +564,7 @@ implements
close
-publicvoidclose()
+publicvoidclose()
Description copied from
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
index f1db5ca..d8515d7 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
@@ -32,813 +32,820 @@
024import static org.junit.Assert.fail;
025
026import java.io.IOException;
-027import java.net.SocketTimeoutException;
-028import java.util.NavigableMap;
-029import java.util.Random;
-030import java.util.Set;
-031import java.util.SortedSet;
-032import
java.util.concurrent.ConcurrentSkipListMap;
-033import
java.util.concurrent.ConcurrentSkipListSet;
-034import
java.util.concurrent.ExecutionException;
-035import java.util.concurrent.Executors;
-036import java.util.concurrent.Future;
-037import
java.util.concurrent.ScheduledExecutorService;
-038import java.util.concurrent.TimeUnit;
-039
-040import
org.apache.hadoop.conf.Configuration;
-041import
org.apache.hadoop.hbase.CategoryBasedTimeout;
-042import
org.apache.hadoop.hbase.DoNotRetryIOException;
-043import
org.apache.hadoop.hbase.HBaseTestingUtility;
-044import
org.apache.hadoop.hbase.NotServingRegionException;
-045import
org.apache.hadoop.hbase.ServerName;
-046import
org.apache.hadoop.hbase.TableName;
-047import
org.apache.hadoop.hbase.client.RegionInfo;
-048import
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-049import
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-050import
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-051import
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-052import
org.apache.hadoop.hbase.master.MasterServices;
-053import
org.apache.hadoop.hbase.master.RegionState.State;
-054import
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-055import
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-056import
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
-057import
org.apache.hadoop.hbase.procedure2.Procedure;
-058import
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-059import
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-060import
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-061import
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-062import
org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-063import
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-064import
org.apache.hadoop.hbase.testclassification.MasterTests;
-065import
org.apache.hadoop.hbase.testclassification.MediumTests;
-066import
org.apache.hadoop.hbase.util.Bytes;
-067import
org.apache.hadoop.hbase.util.FSUtils;
-068import
org.apache.hadoop.ipc.RemoteException;
-069import org.junit.After;
-070import org.junit.Before;
-071import org.junit.Ignore;
-072import org.junit.Rule;
-073import org.junit.Test;
-074import
org.junit.experimental.categories.Category;
-075import
org.junit.rules.ExpectedException;
-076import org.junit.rules.TestName;
-077import org.junit.rules.TestRule;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-082import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-083import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-084import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-085import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
-086import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
-087import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
-088import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
-089import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-090import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-091import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-092
-093@Category({MasterTests.class,
MediumTests.class})
-094public class TestAssignmentManager {
-095 private static final Logger LOG =
LoggerFactory.getLogger(TestAssignmentManager.class);
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
new file mode 100644
index 000..a1ce1bf
--- /dev/null
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
@@ -0,0 +1,1116 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software
Foundation (ASF) under one
+003 * or more contributor license
agreements. See the NOTICE file
+004 * distributed with this work for
additional information
+005 * regarding copyright ownership. The
ASF licenses this file
+006 * to you under the Apache License,
Version 2.0 (the
+007 * "License"); you may not use this file
except in compliance
+008 * with the License. You may obtain a
copy of the License at
+009 *
+010 *
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or
agreed to in writing, software
+013 * distributed under the License is
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
+015 * See the License for the specific
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.client;
+019
+020import static
org.junit.Assert.assertEquals;
+021import static
org.junit.Assert.assertFalse;
+022import static
org.junit.Assert.assertNotNull;
+023import static
org.junit.Assert.assertNull;
+024import static
org.junit.Assert.assertTrue;
+025
+026import java.io.IOException;
+027import java.lang.reflect.Field;
+028import java.lang.reflect.Modifier;
+029import java.net.SocketTimeoutException;
+030import java.util.ArrayList;
+031import java.util.List;
+032import
java.util.concurrent.ExecutorService;
+033import
java.util.concurrent.SynchronousQueue;
+034import
java.util.concurrent.ThreadLocalRandom;
+035import
java.util.concurrent.ThreadPoolExecutor;
+036import java.util.concurrent.TimeUnit;
+037import
java.util.concurrent.atomic.AtomicBoolean;
+038import
java.util.concurrent.atomic.AtomicInteger;
+039import
java.util.concurrent.atomic.AtomicReference;
+040import
org.apache.hadoop.conf.Configuration;
+041import
org.apache.hadoop.hbase.CategoryBasedTimeout;
+042import org.apache.hadoop.hbase.Cell;
+043import
org.apache.hadoop.hbase.HBaseTestingUtility;
+044import
org.apache.hadoop.hbase.HConstants;
+045import
org.apache.hadoop.hbase.HRegionLocation;
+046import
org.apache.hadoop.hbase.RegionLocations;
+047import
org.apache.hadoop.hbase.ServerName;
+048import
org.apache.hadoop.hbase.TableName;
+049import org.apache.hadoop.hbase.Waiter;
+050import
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
+051import
org.apache.hadoop.hbase.exceptions.DeserializationException;
+052import
org.apache.hadoop.hbase.exceptions.RegionMovedException;
+053import
org.apache.hadoop.hbase.filter.Filter;
+054import
org.apache.hadoop.hbase.filter.FilterBase;
+055import
org.apache.hadoop.hbase.ipc.RpcClient;
+056import
org.apache.hadoop.hbase.master.HMaster;
+057import
org.apache.hadoop.hbase.regionserver.HRegion;
+058import
org.apache.hadoop.hbase.regionserver.HRegionServer;
+059import
org.apache.hadoop.hbase.regionserver.Region;
+060import
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+061import
org.apache.hadoop.hbase.testclassification.LargeTests;
+062import
org.apache.hadoop.hbase.util.Bytes;
+063import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+064import
org.apache.hadoop.hbase.util.JVMClusterUtil;
+065import
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+066import
org.apache.hadoop.hbase.util.Threads;
+067import org.junit.AfterClass;
+068import org.junit.Assert;
+069import org.junit.BeforeClass;
+070import org.junit.Ignore;
+071import org.junit.Rule;
+072import org.junit.Test;
+073import
org.junit.experimental.categories.Category;
+074import org.junit.rules.TestName;
+075import org.junit.rules.TestRule;
+076import org.slf4j.Logger;
+077import org.slf4j.LoggerFactory;
+078
+079import
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+080
+081/**
+082 * This class is for testing
HBaseConnectionManager features
+083 */
+084@Category({LargeTests.class})
+085public class TestConnectionImplementation
{
+086 @Rule
+087 public final TestRule timeout =
CategoryBasedTimeout.builder().withTimeout(this.getClass())
+088
.withLookingForStuckThread(true).build();
+089 private static final Logger LOG =
LoggerFactory.getLogger(TestConnectionImplementation.class);
+090 private final static
HBaseTestingUtility
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
index 20e11b1..4b5f191 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
-public class TestWALReaderOnSecureWAL
+public class TestWALReaderOnSecureWAL
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TEST_UTIL
-static finalHBaseTestingUtility TEST_UTIL
+static finalHBaseTestingUtility TEST_UTIL
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
value
-finalbyte[] value
+finalbyte[] value
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
WAL_ENCRYPTION
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String WAL_ENCRYPTION
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String WAL_ENCRYPTION
See Also:
Constant
Field Values
@@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
currentTest
-publicorg.junit.rules.TestName currentTest
+publicorg.junit.rules.TestName currentTest
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TestWALReaderOnSecureWAL
-publicTestWALReaderOnSecureWAL()
+publicTestWALReaderOnSecureWAL()
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
writeWAL
-privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
+privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringtblName,
booleanoffheap)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
@@ -326,7 +326,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testWALReaderOnSecureWALWithKeyValues
-publicvoidtestWALReaderOnSecureWALWithKeyValues()
+publicvoidtestWALReaderOnSecureWALWithKeyValues()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testWALReaderOnSecureWALWithOffheapKeyValues
-publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
+publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testSecureWALInternal
-privatevoidtestSecureWALInternal(booleanoffheap)
+privatevoidtestSecureWALInternal(booleanoffheap)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException,
http://docs.oracle.com/javase/8/docs/api/java/io/FileNotFoundException.html?is-external=true;
title="class or interface in java.io">FileNotFoundException
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testSecureWALReaderOnWAL
-publicvoidtestSecureWALReaderOnWAL()
+publicvoidtestSecureWALReaderOnWAL()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
index 63567ad..8841740 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
-protected class RSProcedureDispatcher.ExecuteProceduresRemoteCall
+protected class RSProcedureDispatcher.ExecuteProceduresRemoteCall
extends RSProcedureDispatcher.AbstractRSRemoteCall
implements RSProcedureDispatcher.RemoteProcedureResolver
@@ -199,9 +199,9 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
-private void
-remoteCallCompleted(MasterProcedureEnvenv,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponseresponse)
+void
+dispatchServerOperations(MasterProcedureEnvenv,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRSProcedureDispatcher.ServerOperationoperations)
private void
@@ -248,7 +248,7 @@ implements
remoteProcedures
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedure
remoteProcedures
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedure
remoteProcedures
@@ -257,7 +257,7 @@ implements
request
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder
request
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder
request
@@ -274,7 +274,7 @@ implements
ExecuteProceduresRemoteCall
-publicExecuteProceduresRemoteCall(ServerNameserverName,
+publicExecuteProceduresRemoteCall(ServerNameserverName,
http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures)
@@ -292,7 +292,7 @@ implements
call
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
title="class or interface in java.lang">Voidcall()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
title="class or interface in java.lang">Voidcall()
Specified by:
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--;
title="class or interface in java.util.concurrent">callin
interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
title="class or interface in java.lang">Void
@@ -307,7 +307,7 @@ implements
dispatchOpenRequests
-publicvoiddispatchOpenRequests(MasterProcedureEnvenv,
+publicvoiddispatchOpenRequests(MasterProcedureEnvenv,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
Specified by:
@@ -321,7 +321,7 @@ implements
dispatchCloseRequests
-publicvoiddispatchCloseRequests(MasterProcedureEnvenv,
+publicvoiddispatchCloseRequests(MasterProcedureEnvenv,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRSProcedureDispatcher.RegionCloseOperationoperations)
Specified by:
@@ -329,29 +329,33 @@ implements
+
-sendRequest
-protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponsesendRequest(ServerNameserverName,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequestrequest)
-
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index 3cef254..0f033c6 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
-static class HBaseFsck.OnlineEntry
+static class HBaseFsck.OnlineEntry
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Stores the regioninfo retrieved from Online region
servers.
@@ -206,7 +206,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
hri
-RegionInfo hri
+RegionInfo hri
@@ -215,7 +215,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
hsa
-ServerName hsa
+ServerName hsa
@@ -232,7 +232,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
OnlineEntry
-OnlineEntry()
+OnlineEntry()
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringtoString()
Overrides:
http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
title="class or interface in java.lang">toStringin
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index c1666dc..f98492d 100644
---
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
-static class HBaseFsck.PrintingErrorReporter
+static class HBaseFsck.PrintingErrorReporter
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements HBaseFsck.ErrorReporter
@@ -301,7 +301,7 @@ implements
errorCount
-publicint errorCount
+publicint errorCount
@@ -310,7 +310,7 @@ implements
showProgress
-privateint showProgress
+privateint showProgress
@@ -319,7 +319,7 @@ implements
progressThreshold
-private static finalint progressThreshold
+private static finalint progressThreshold
See Also:
Constant
Field Values
@@ -332,7 +332,7 @@ implements
errorTables
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
+http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
@@ -341,7 +341,7 @@ implements
errorList
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
@@ -358,7 +358,7 @@ implements
PrintingErrorReporter
-PrintingErrorReporter()
+PrintingErrorReporter()
@@ -375,7 +375,7 @@ implements
clear
-publicvoidclear()
+publicvoidclear()
Specified by:
clearin
interfaceHBaseFsck.ErrorReporter
@@ -388,7 +388,7 @@ implements
reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringmessage)
Specified by:
@@ -402,7 +402,7 @@ implements
reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index 1318b95..841130a 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -55,1647 +55,1615 @@
047import
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
048import
org.apache.hadoop.hbase.coprocessor.MasterObserver;
049import
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-050import
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-051import
org.apache.hadoop.hbase.master.locking.LockProcedure;
-052import
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-053import
org.apache.hadoop.hbase.metrics.MetricRegistry;
-054import
org.apache.hadoop.hbase.net.Address;
-055import
org.apache.hadoop.hbase.procedure2.LockType;
-056import
org.apache.hadoop.hbase.procedure2.LockedResource;
-057import
org.apache.hadoop.hbase.procedure2.Procedure;
-058import
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-059import
org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
-060import
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-061import
org.apache.hadoop.hbase.security.User;
-062import
org.apache.yetus.audience.InterfaceAudience;
-063import org.slf4j.Logger;
-064import org.slf4j.LoggerFactory;
-065
-066/**
-067 * Provides the coprocessor framework and
environment for master oriented
-068 * operations. {@link HMaster} interacts
with the loaded coprocessors
-069 * through this class.
-070 */
-071@InterfaceAudience.Private
-072public class MasterCoprocessorHost
-073extends
CoprocessorHostMasterCoprocessor, MasterCoprocessorEnvironment {
-074
-075 private static final Logger LOG =
LoggerFactory.getLogger(MasterCoprocessorHost.class);
-076
-077 /**
-078 * Coprocessor environment extension
providing access to master related
-079 * services.
-080 */
-081 private static class MasterEnvironment
extends BaseEnvironmentMasterCoprocessor
-082 implements
MasterCoprocessorEnvironment {
-083private final boolean
supportGroupCPs;
-084private final MetricRegistry
metricRegistry;
-085private final MasterServices
services;
-086
-087public MasterEnvironment(final
MasterCoprocessor impl, final int priority, final int seq,
-088final Configuration conf, final
MasterServices services) {
-089 super(impl, priority, seq, conf);
-090 this.services = services;
-091 supportGroupCPs =
!useLegacyMethod(impl.getClass(),
-092 "preBalanceRSGroup",
ObserverContext.class, String.class);
-093 this.metricRegistry =
-094
MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
-095}
-096
-097@Override
-098public ServerName getServerName() {
-099 return
this.services.getServerName();
-100}
-101
-102@Override
-103public Connection getConnection() {
-104 return new
SharedConnection(this.services.getConnection());
-105}
-106
-107@Override
-108public Connection
createConnection(Configuration conf) throws IOException {
-109 return
this.services.createConnection(conf);
-110}
-111
-112@Override
-113public MetricRegistry
getMetricRegistryForMaster() {
-114 return metricRegistry;
-115}
-116
-117@Override
-118public void shutdown() {
-119 super.shutdown();
-120
MetricsCoprocessor.removeRegistry(this.metricRegistry);
-121}
-122 }
-123
-124 /**
-125 * Special version of MasterEnvironment
that exposes MasterServices for Core Coprocessors only.
-126 * Temporary hack until Core
Coprocessors are integrated into Core.
-127 */
-128 private static class
MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
-129 implements HasMasterServices {
-130private final MasterServices
masterServices;
-131
-132public
MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int
priority,
-133final int seq, final
Configuration conf, final MasterServices services) {
-134 super(impl, priority, seq, conf,
services);
-135 this.masterServices = services;
-136}
-137
-138/**
-139 * @return An instance of
MasterServices, an object NOT for general user-space Coprocessor
-140 * consumption.
-141 */
-142public MasterServices
getMasterServices() {
-143 return this.masterServices;
-144}
-145 }
-146
-147 private MasterServices
masterServices;
-148
-149 public
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
index e650154..4f1a4d3 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
@@ -334,6 +334,6 @@ extends
-Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
index edfdbac..9128281 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
@@ -786,6 +786,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
-Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
index f69e178..b10fbd0 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
@@ -378,6 +378,6 @@ extends Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
index 6965a51..e5e8408 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
@@ -400,6 +400,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
-Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
b/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
index c49546e..9912127 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
@@ -823,6 +823,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
-Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
index cf82a3d..1de3398 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
@@ -583,6 +583,6 @@ protectedCopyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
+Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights
reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
index bbd91b8..4f76302 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
@@ -56,1641 +56,1753 @@
048import
java.util.concurrent.atomic.AtomicBoolean;
049import
java.util.concurrent.atomic.AtomicInteger;
050import
java.util.concurrent.atomic.AtomicLong;
-051
-052import
org.apache.hadoop.conf.Configuration;
-053import
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import
org.apache.hadoop.hbase.HConstants;
-057import
org.apache.hadoop.hbase.HRegionInfo;
-058import
org.apache.hadoop.hbase.HRegionLocation;
-059import
org.apache.hadoop.hbase.RegionLocations;
-060import
org.apache.hadoop.hbase.ServerName;
-061import
org.apache.hadoop.hbase.TableName;
-062import
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import
org.apache.hadoop.hbase.util.Bytes;
-071import
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class,
MediumTests.class})
-084public class TestAsyncProcess {
-085 @Rule public final TestRule timeout =
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086
withLookingForStuckThread(true).build();
-087 private static final Logger LOG =
LoggerFactory.getLogger(TestAsyncProcess.class);
-088 private static final TableName
DUMMY_TABLE =
-089 TableName.valueOf("DUMMY_TABLE");
-090 private static final byte[]
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091 private static final byte[]
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092 private static final byte[]
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093 private static final byte[] FAILS =
Bytes.toBytes("FAILS");
-094 private static final Configuration CONF
= new Configuration();
-095 private static final
ConnectionConfiguration CONNECTION_CONFIG =
-096 new
ConnectionConfiguration(CONF);
-097 private static final ServerName sn =
ServerName.valueOf("s1,1,1");
-098 private static final ServerName sn2 =
ServerName.valueOf("s2,2,2");
-099 private static final ServerName sn3 =
ServerName.valueOf("s3,3,3");
-100 private static final HRegionInfo hri1
=
-101 new HRegionInfo(DUMMY_TABLE,
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102 private static final HRegionInfo hri2
=
-103 new HRegionInfo(DUMMY_TABLE,
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104 private static final HRegionInfo hri3
=
-105 new HRegionInfo(DUMMY_TABLE,
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106 private static final HRegionLocation
loc1 = new HRegionLocation(hri1, sn);
-107 private static final HRegionLocation
loc2 = new HRegionLocation(hri2, sn);
-108 private static final HRegionLocation
loc3 = new HRegionLocation(hri3, sn2);
-109
-110 // Replica stuff
-111 private static final RegionInfo hri1r1
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112 private static final RegionInfo hri1r2
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113 private static final RegionInfo hri2r1
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114 private static final RegionLocations
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115 new HRegionLocation(hri1r1, sn2),
new HRegionLocation(hri1r2, sn3));
-116 private static final RegionLocations
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117 new HRegionLocation(hri2r1,
sn3));
-118 private static final RegionLocations
hrls3 =
-119 new RegionLocations(new
HRegionLocation(hri3, sn3), null);
-120
-121 private static final
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
index b209f49..6fcf813 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
@@ -361,9 +361,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Cell
-maybeCloneWithAllocator(Cellcell)
+maybeCloneWithAllocator(Cellcell,
+ booleanforceCloneOfBigCell)
If the segment has a memory allocator the cell is being
cloned to this space, and returned;
- otherwise the given cell is returned
+ otherwise the given cell is returned
+
+ When a cell's size is too big (bigger than maxAlloc), it is not allocated on
MSLAB.
@@ -649,15 +652,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Closing a segment before it is being discarded
-
+
maybeCloneWithAllocator
-publicCellmaybeCloneWithAllocator(Cellcell)
+publicCellmaybeCloneWithAllocator(Cellcell,
+booleanforceCloneOfBigCell)
If the segment has a memory allocator the cell is being
cloned to this space, and returned;
- otherwise the given cell is returned
+ otherwise the given cell is returned
+
+ When a cell's size is too big (bigger than maxAlloc), it is not allocated on
MSLAB.
+ Since the process of flattening to CellChunkMap assumes that all cells
+ are allocated on MSLAB, during this process, the input parameter
+ forceCloneOfBigCell is set to 'true' and the cell is copied into MSLAB.
Returns:
either the given cell or its clone
@@ -670,7 +679,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
getCellLength
-staticintgetCellLength(Cellcell)
+staticintgetCellLength(Cellcell)
Get cell length after serialized in KeyValue
@@ -680,7 +689,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
shouldSeek
-publicbooleanshouldSeek(TimeRangetr,
+publicbooleanshouldSeek(TimeRangetr,
longoldestUnexpiredTS)
@@ -690,7 +699,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
isTagsPresent
-publicbooleanisTagsPresent()
+publicbooleanisTagsPresent()
@@ -699,7 +708,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
incScannerCount
-publicvoidincScannerCount()
+publicvoidincScannerCount()
@@ -708,7 +717,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
decScannerCount
-publicvoiddecScannerCount()
+publicvoiddecScannerCount()
@@ -717,7 +726,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setCellSet
-protectedSegmentsetCellSet(CellSetcellSetOld,
+protectedSegmentsetCellSet(CellSetcellSetOld,
CellSetcellSetNew)
Setting the CellSet of the segment - used only for flat
immutable segment for setting
immutable CellSet after its creation in immutable segment constructor
@@ -733,7 +742,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
keySize
-publiclongkeySize()
+publiclongkeySize()
Returns:
Sum of all cell's size.
@@ -746,7 +755,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
heapSize
-publiclongheapSize()
+publiclongheapSize()
Returns:
The heap size of this segment.
@@ -759,7 +768,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
incSize
-protectedvoidincSize(longdelta,
+protectedvoidincSize(longdelta,
longheapOverhead)
Updates the size counters of the segment by the given
delta
@@ -770,7 +779,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
getMinSequenceId
-publiclonggetMinSequenceId()
+publiclonggetMinSequenceId()
@@ -779,7 +788,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
getTimeRangeTracker
-publicTimeRangeTrackergetTimeRangeTracker()
+publicTimeRangeTrackergetTimeRangeTracker()
@@ -788,7 +797,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
last
-publicCelllast()
+publicCelllast()
@@ -797,7 +806,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
iterator
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
title="class or interface in java.util">IteratorCelliterator()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
title="class or interface in java.util">IteratorCelliterator()
@@ -806,7 +815,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
headSet
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
index 6fac503..cf9ce85 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
@@ -647,7 +647,7 @@ implements
updateChorePoolSize
-privatevoidupdateChorePoolSize(intupdatedSize)
+privatevoidupdateChorePoolSize(intupdatedSize)
@@ -656,7 +656,7 @@ implements
newFileCleaner
-privateTnewFileCleaner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringclassName,
+privateTnewFileCleaner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringclassName,
org.apache.hadoop.conf.Configurationconf)
A utility method to create new instances of
LogCleanerDelegate based on the class name of the
LogCleanerDelegate.
@@ -675,7 +675,7 @@ implements
chore
-protectedvoidchore()
+protectedvoidchore()
Description copied from
class:ScheduledChore
The task to execute on each scheduled execution of the
Chore
@@ -690,7 +690,7 @@ implements
preRunCleaner
-privatevoidpreRunCleaner()
+privatevoidpreRunCleaner()
@@ -699,7 +699,7 @@ implements
runCleaner
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">BooleanrunCleaner()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">BooleanrunCleaner()
@@ -708,7 +708,7 @@ implements
sortByConsumedSpace
-privatevoidsortByConsumedSpace(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusdirs)
+privatevoidsortByConsumedSpace(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusdirs)
Sort the given list in (descending) order of the space each
element takes
Parameters:
@@ -722,7 +722,7 @@ implements
checkAndDeleteFiles
-privatebooleancheckAndDeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusfiles)
+privatebooleancheckAndDeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusfiles)
Run the given files through each of the cleaners to see if
it should be deleted, deleting it if
necessary.
@@ -739,7 +739,7 @@ implements
deleteFiles
-protectedintdeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
title="class or interface in
java.lang">Iterableorg.apache.hadoop.fs.FileStatusfilesToDelete)
+protectedintdeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
title="class or interface in
java.lang">Iterableorg.apache.hadoop.fs.FileStatusfilesToDelete)
Delete the given files
Parameters:
@@ -755,7 +755,7 @@ implements
cleanup
-publicvoidcleanup()
+publicvoidcleanup()
Description copied from
class:ScheduledChore
Override to run cleanup tasks when the Chore encounters an
error and must stop running
@@ -770,7 +770,7 @@ implements
getChorePoolSize
-intgetChorePoolSize()
+intgetChorePoolSize()
@@ -779,7 +779,7 @@ implements
setEnabled
-publicbooleansetEnabled(booleanenabled)
+publicbooleansetEnabled(booleanenabled)
Parameters:
enabled -
@@ -792,7 +792,7 @@ implements
getEnabled
-publicbooleangetEnabled()
+publicbooleangetEnabled()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
index 39a043b..794142f 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
@@ -104,7 +104,7 @@
private boolean
-CleanerChore.CleanerTask.getCleanRusult(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListCleanerChore.CleanerTasktasks)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
index 3400507..2baa140 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
@@ -28,3034 +28,2926 @@
020import static
org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
021import static
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
022
-023import
com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.DataOutput;
-026import java.io.DataOutputStream;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.math.BigDecimal;
-030import java.nio.ByteBuffer;
-031import java.util.ArrayList;
-032import java.util.Iterator;
-033import java.util.List;
-034import java.util.Optional;
-035
-036import
org.apache.hadoop.hbase.KeyValue.Type;
-037import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-038import
org.apache.hadoop.hbase.io.HeapSize;
-039import
org.apache.hadoop.hbase.io.TagCompressionContext;
-040import
org.apache.hadoop.hbase.io.util.Dictionary;
-041import
org.apache.hadoop.hbase.io.util.StreamUtils;
-042import
org.apache.hadoop.hbase.util.ByteBufferUtils;
-043import
org.apache.hadoop.hbase.util.ByteRange;
-044import
org.apache.hadoop.hbase.util.Bytes;
-045import
org.apache.hadoop.hbase.util.ClassSize;
-046import
org.apache.yetus.audience.InterfaceAudience;
-047
-048
-049/**
-050 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
-051 * rich set of APIs than those in {@link
CellUtil} for internal usage.
-052 */
-053@InterfaceAudience.Private
-054public final class PrivateCellUtil {
-055
-056 /**
-057 * Private constructor to keep this
class from being instantiated.
-058 */
-059 private PrivateCellUtil() {
-060 }
+023import java.io.DataOutput;
+024import java.io.DataOutputStream;
+025import java.io.IOException;
+026import java.io.OutputStream;
+027import java.math.BigDecimal;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Iterator;
+031import java.util.List;
+032import java.util.Optional;
+033import
org.apache.hadoop.hbase.KeyValue.Type;
+034import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+035import
org.apache.hadoop.hbase.io.HeapSize;
+036import
org.apache.hadoop.hbase.io.TagCompressionContext;
+037import
org.apache.hadoop.hbase.io.util.Dictionary;
+038import
org.apache.hadoop.hbase.io.util.StreamUtils;
+039import
org.apache.hadoop.hbase.util.ByteBufferUtils;
+040import
org.apache.hadoop.hbase.util.ByteRange;
+041import
org.apache.hadoop.hbase.util.Bytes;
+042import
org.apache.hadoop.hbase.util.ClassSize;
+043import
org.apache.yetus.audience.InterfaceAudience;
+044
+045import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+046
+047/**
+048 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
+049 * rich set of APIs than those in {@link
CellUtil} for internal usage.
+050 */
+051@InterfaceAudience.Private
+052public final class PrivateCellUtil {
+053
+054 /**
+055 * Private constructor to keep this
class from being instantiated.
+056 */
+057 private PrivateCellUtil() {
+058 }
+059
+060 /*** ByteRange
***/
061
-062 /*** ByteRange
***/
-063
-064 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
-065return range.set(cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
-066 }
-067
-068 public static ByteRange
fillFamilyRange(Cell cell, ByteRange range) {
-069return
range.set(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength());
-070 }
-071
-072 public static ByteRange
fillQualifierRange(Cell cell, ByteRange range) {
-073return
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-074 cell.getQualifierLength());
-075 }
-076
-077 public static ByteRange
fillValueRange(Cell cell, ByteRange range) {
-078return
range.set(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
-079 }
-080
-081 public static ByteRange
fillTagRange(Cell cell, ByteRange range) {
-082return range.set(cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength());
-083 }
+062 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
+063return range.set(cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
+064 }
+065
+066 public static ByteRange
fillFamilyRange(Cell cell, ByteRange range) {
+067return
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
index ce8c56c..1ebcb9e 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
@@ -29,535 +29,532 @@
021import java.io.IOException;
022import java.util.ArrayList;
023import java.util.Collection;
-024import java.util.HashMap;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.Set;
-028import java.util.TreeMap;
-029import
java.util.concurrent.ConcurrentHashMap;
-030import
java.util.concurrent.ConcurrentMap;
-031
-032import
org.apache.hadoop.conf.Configuration;
-033import
org.apache.hadoop.hbase.Abortable;
-034import
org.apache.hadoop.hbase.CompoundConfiguration;
-035import
org.apache.hadoop.hbase.HBaseConfiguration;
-036import
org.apache.hadoop.hbase.TableName;
-037import
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-038import
org.apache.hadoop.hbase.exceptions.DeserializationException;
-039import
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-040import
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-041import
org.apache.hadoop.hbase.util.Bytes;
-042import
org.apache.hadoop.hbase.util.Pair;
-043import
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-044import
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-045import
org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
-046import
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-047import
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-048import
org.apache.yetus.audience.InterfaceAudience;
-049import
org.apache.zookeeper.KeeperException;
-050import org.slf4j.Logger;
-051import org.slf4j.LoggerFactory;
-052
-053/**
-054 * This class provides an implementation
of the ReplicationPeers interface using ZooKeeper. The
-055 * peers znode contains a list of all
peer replication clusters and the current replication state of
-056 * those clusters. It has one child peer
znode for each peer cluster. The peer znode is named with
-057 * the cluster id provided by the user in
the HBase shell. The value of the peer znode contains the
-058 * peers cluster key provided by the user
in the HBase Shell. The cluster key contains a list of
-059 * zookeeper quorum peers, the client
port for the zookeeper quorum, and the base znode for HBase.
-060 * For example:
-061 *
-062 * /hbase/replication/peers/1 [Value:
zk1.host.com,zk2.host.com,zk3.host.com:2181:/hbase]
-063 * /hbase/replication/peers/2 [Value:
zk5.host.com,zk6.host.com,zk7.host.com:2181:/hbase]
-064 *
-065 * Each of these peer znodes has a child
znode that indicates whether or not replication is enabled
-066 * on that peer cluster. These peer-state
znodes do not have child znodes and simply contain a
-067 * boolean value (i.e. ENABLED or
DISABLED). This value is read/maintained by the
-068 * ReplicationPeer.PeerStateTracker
class. For example:
+024import java.util.List;
+025import java.util.Map;
+026import java.util.Set;
+027import java.util.TreeMap;
+028import
java.util.concurrent.ConcurrentHashMap;
+029import
java.util.concurrent.ConcurrentMap;
+030
+031import
org.apache.hadoop.conf.Configuration;
+032import
org.apache.hadoop.hbase.Abortable;
+033import
org.apache.hadoop.hbase.CompoundConfiguration;
+034import
org.apache.hadoop.hbase.HBaseConfiguration;
+035import
org.apache.hadoop.hbase.TableName;
+036import
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
+037import
org.apache.hadoop.hbase.exceptions.DeserializationException;
+038import
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+039import
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+040import
org.apache.hadoop.hbase.util.Pair;
+041import
org.apache.hadoop.hbase.zookeeper.ZKConfig;
+042import
org.apache.hadoop.hbase.zookeeper.ZKUtil;
+043import
org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+044import
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+045import
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+046import
org.apache.yetus.audience.InterfaceAudience;
+047import
org.apache.zookeeper.KeeperException;
+048import org.slf4j.Logger;
+049import org.slf4j.LoggerFactory;
+050
+051/**
+052 * This class provides an implementation
of the ReplicationPeers interface using ZooKeeper. The
+053 * peers znode contains a list of all
peer replication clusters and the current replication state of
+054 * those clusters. It has one child peer
znode for each peer cluster. The peer znode is named with
+055 * the cluster id provided by the user in
the HBase shell.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index f7fbfbf..88ebcbc 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -34,1583 +34,1583 @@
026import java.io.IOException;
027import java.util.ArrayList;
028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.EnumSet;
-032import java.util.HashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import java.util.Set;
-037import
java.util.concurrent.CompletableFuture;
-038import java.util.concurrent.TimeUnit;
-039import
java.util.concurrent.atomic.AtomicReference;
-040import java.util.function.BiConsumer;
-041import java.util.function.Function;
-042import java.util.regex.Pattern;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045import org.apache.commons.io.IOUtils;
-046import
org.apache.hadoop.conf.Configuration;
-047import
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-048import
org.apache.hadoop.hbase.ClusterMetrics.Option;
-049import
org.apache.hadoop.hbase.ClusterStatus;
-050import
org.apache.hadoop.hbase.HConstants;
-051import
org.apache.hadoop.hbase.HRegionLocation;
-052import
org.apache.hadoop.hbase.MetaTableAccessor;
-053import
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import
org.apache.hadoop.hbase.NamespaceDescriptor;
-055import
org.apache.hadoop.hbase.RegionLoad;
-056import
org.apache.hadoop.hbase.RegionLocations;
-057import
org.apache.hadoop.hbase.ServerName;
-058import
org.apache.hadoop.hbase.TableExistsException;
-059import
org.apache.hadoop.hbase.TableName;
-060import
org.apache.hadoop.hbase.TableNotDisabledException;
-061import
org.apache.hadoop.hbase.TableNotEnabledException;
-062import
org.apache.hadoop.hbase.TableNotFoundException;
-063import
org.apache.hadoop.hbase.UnknownRegionException;
-064import
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-065import
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-066import
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-067import
org.apache.hadoop.hbase.client.Scan.ReadType;
-068import
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-069import
org.apache.hadoop.hbase.client.replication.TableCFs;
-070import
org.apache.hadoop.hbase.client.security.SecurityCapability;
-071import
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import
org.apache.hadoop.hbase.replication.ReplicationException;
-077import
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-080import
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-081import
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-082import
org.apache.hadoop.hbase.util.Bytes;
-083import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-084import
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-085import
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-090import
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
-091import
org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
-092import
org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
-093import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-094import
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-095import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-096import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-097import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-098import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-099import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-100import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
b/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
new file mode 100644
index 000..92ca29d
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
@@ -0,0 +1,582 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RegionMetrics (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods =
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":18,"i9":6,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Interface RegionMetrics
+
+
+
+
+
+
+All Known Implementing Classes:
+RegionLoad, RegionMetricsBuilder.RegionMetricsImpl
+
+
+
+@InterfaceAudience.Public
+public interface RegionMetrics
+Encapsulates per-region load metrics.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract MethodsDefault Methods
+
+Modifier and Type
+Method and Description
+
+
+Size
+getBloomFilterSize()
+
+
+long
+getCompactedCellCount()
+
+
+long
+getCompactingCellCount()
+
+
+long
+getCompletedSequenceId()
+This does not really belong inside RegionLoad but its being
done in the name of expediency.
+
+
+
+float
+getDataLocality()
+
+
+long
+getFilteredReadRequestCount()
+
+
+long
+getLastMajorCompactionTimestamp()
+
+
+Size
+getMemStoreSize()
+
+
+default http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
+getNameAsString()
+
+
+long
+getReadRequestCount()
+
+
+byte[]
+getRegionName()
+
+
+default long
+getRequestCount()
+
+
+int
+getStoreCount()
+
+
+int
+getStoreFileCount()
+
+
+Size
+getStoreFileIndexSize()
+TODO: why we pass the same value to different counters?
Currently, the value from
+ getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
+ see HRegionServer#createRegionLoad.
+
+
+
+Size
+getStoreFileRootLevelIndexSize()
+
+
+Size
+getStoreFileSize()
+
+
+Size
+getStoreFileUncompressedDataIndexSize()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
title="class or interface in java.lang">Long
+getStoreSequenceId()
+
+
+Size
+getUncompressedStoreFileSize()
+
+
+long
+getWriteRequestCount()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+getRegionName
+byte[]getRegionName()
+
+Returns:
+the region name
+
+
+
+
+
+
+
+
+getStoreCount
+intgetStoreCount()
+
+Returns:
+the number of stores
+
+
+
+
+
+
+
+
+getStoreFileCount
+intgetStoreFileCount()
+
+Returns:
+the number of storefiles
+
+
+
+
+
+
+
+
+getStoreFileSize
+SizegetStoreFileSize()
+
+Returns:
+the total size of the storefiles
+
+
+
+
+
+
+
+
+getMemStoreSize
+SizegetMemStoreSize()
+
+Returns:
+the memstore size
+
+
+
+
+
+
+
+
+getReadRequestCount
+longgetReadRequestCount()
+
+Returns:
+the number of read requests made to region
+
+
+
+
+
+
+
+
+getWriteRequestCount
+longgetWriteRequestCount()
+
+Returns:
+the number of write requests made to region
+
+
+
+
+
+
+
+
+getRequestCount
+defaultlonggetRequestCount()
+
+Returns:
+the number of write requests and read requests made to region
+
+
+
+
+
+
+
+
+getNameAsString
+defaulthttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringgetNameAsString()
+
+Returns:
+the region name as a string
+
+
+
+
+
+
+
+
+getFilteredReadRequestCount
+longgetFilteredReadRequestCount()
+
+Returns:
+the number of filtered read requests made to region
+
+
+
+
+
+
+
+
+getStoreFileIndexSize
+SizegetStoreFileIndexSize()
+TODO: why we pass the same value to different counters?
Currently, the value from
+ getStoreFileIndexSize() is same with
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
index a631eea..569ac21 100644
---
a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
+++
b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
-static class HTableMultiplexer.FlushWorker
+static class HTableMultiplexer.FlushWorker
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
title="class or interface in java.lang">Runnable
@@ -317,7 +317,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
addr
-private finalHRegionLocation addr
+private finalHRegionLocation addr
@@ -326,7 +326,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
queue
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/LinkedBlockingQueue.html?is-external=true;
title="class or interface in
java.util.concurrent">LinkedBlockingQueueHTableMultiplexer.PutStatus queue
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/LinkedBlockingQueue.html?is-external=true;
title="class or interface in
java.util.concurrent">LinkedBlockingQueueHTableMultiplexer.PutStatus queue
@@ -335,7 +335,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
multiplexer
-private finalHTableMultiplexer multiplexer
+private finalHTableMultiplexer multiplexer
@@ -344,7 +344,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
totalFailedPutCount
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicLong totalFailedPutCount
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicLong totalFailedPutCount
@@ -353,7 +353,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
currentProcessingCount
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicInteger currentProcessingCount
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicInteger currentProcessingCount
@@ -362,7 +362,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
averageLatency
-private finalHTableMultiplexer.AtomicAverageCounter averageLatency
+private finalHTableMultiplexer.AtomicAverageCounter averageLatency
@@ -371,7 +371,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
maxLatency
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicLong maxLatency
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicLong maxLatency
@@ -380,7 +380,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
ap
-private finalAsyncProcess ap
+private finalAsyncProcess ap
@@ -389,7 +389,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
processingList
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListHTableMultiplexer.PutStatus processingList
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListHTableMultiplexer.PutStatus processingList
@@ -398,7 +398,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
executor
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
title="class or interface in
java.util.concurrent">ScheduledExecutorService executor
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
title="class or interface in
java.util.concurrent">ScheduledExecutorService executor
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
index 78834c4..51bbeaa 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
@@ -511,17 +511,17 @@
503x = src.get();
504a1 = ord.apply(x) 0xff;
505if (-1 == unsignedCmp(a0, 249)) {
-506 return (a0 - 241) * 256 + a1 +
240;
+506 return (a0 - 241L) * 256 + a1 +
240;
507}
508x = src.get();
509a2 = ord.apply(x) 0xff;
510if (a0 == 249) {
-511 return 2288 + 256 * a1 + a2;
+511 return 2288L + 256 * a1 + a2;
512}
513x = src.get();
514a3 = ord.apply(x) 0xff;
515if (a0 == 250) {
-516 return (a1 16) | (a2
8) | a3;
+516 return ((long) a1 16L) |
(a2 8) | a3;
517}
518x = src.get();
519a4 = ord.apply(x) 0xff;
@@ -671,1099 +671,1101 @@
663 dst.put((byte) ((2 * d + 1)
0xff));
664 abs =
abs.subtract(BigDecimal.valueOf(d));
665}
-666a[offset + dst.getPosition() - 1]
= 0xfe; // terminal digit should be 2x
-667if (isNeg) {
-668 // negative values encoded as ~M
-669 DESCENDING.apply(a, offset +
startM, dst.getPosition() - startM);
-670}
-671return dst.getPosition() - start;
-672 }
-673
-674 /**
-675 * Encode the large magnitude floating
point number {@code val} using
-676 * the key encoding. The caller
guarantees that {@code val} will be
-677 * finite and abs(val) = 1.0.
-678 * p
-679 * A floating point value is encoded as
an integer exponent {@code E}
-680 * and a mantissa {@code M}. The
original value is equal to
-681 * {@code (M * 100^E)}. {@code E} is
set to the smallest value
-682 * possible without making {@code M}
greater than or equal to 1.0.
-683 * /p
-684 * p
-685 * Each centimal digit of the mantissa
is stored in a byte. If the value of
-686 * the centimal digit is {@code X}
(hence {@code X=0} and
-687 * {@code X=99}) then the byte
value will be {@code 2*X+1} for
-688 * every byte of the mantissa, except
for the last byte which will be
-689 * {@code 2*X+0}. The mantissa must be
the minimum number of bytes
-690 * necessary to represent the value;
trailing {@code X==0} digits are
-691 * omitted. This means that the
mantissa will never contain a byte with the
-692 * value {@code 0x00}.
-693 * /p
-694 * p
-695 * If {@code E 10}, then this
routine writes of {@code E} as a
-696 * varint followed by the mantissa as
described above. Otherwise, if
-697 * {@code E = 10}, this routine
only writes the mantissa and leaves
-698 * the {@code E} value to be encoded as
part of the opening byte of the
-699 * field by the calling function.
-700 *
-701 * pre
-702 * Encoding: M (if E=10)
-703 * E M (if E10)
-704 * /pre
-705 * /p
-706 * @param dst The destination to which
encoded digits are written.
-707 * @param val The value to encode.
-708 * @return the number of bytes
written.
-709 */
-710 private static int
encodeNumericLarge(PositionedByteRange dst, BigDecimal val) {
-711// TODO: this can be done faster
-712BigDecimal abs = val.abs();
-713byte[] a = dst.getBytes();
-714boolean isNeg = val.signum() == -1;
-715final int start = dst.getPosition(),
offset = dst.getOffset();
-716int e = 0, d, startM;
-717
-718if (isNeg) { /* Large negative
number: 0x08, ~E, ~M */
-719 dst.put(NEG_LARGE);
-720} else { /* Large positive number:
0x22, E, M */
-721 dst.put(POS_LARGE);
-722}
-723
-724// normalize abs(val) to determine
E
-725while (abs.compareTo(E32) = 0
e = 350) { abs = abs.movePointLeft(32); e +=16; }
-726while (abs.compareTo(E8) = 0
e = 350) { abs = abs.movePointLeft(8); e+= 4; }
-727while (abs.compareTo(BigDecimal.ONE)
= 0 e = 350) { abs = abs.movePointLeft(2); e++; }
-728
-729// encode appropriate header byte
and/or E value.
-730if (e 10) { /* large number,
write out {~,}E */
-731 putVaruint64(dst, e, isNeg);
-732} else {
-733 if (isNeg) { /* Medium negative
number: 0x13-E, ~M */
-734dst.put(start, (byte)
(NEG_MED_MAX - e));
-735 } else { /* Medium positive number:
0x17+E, M */
-736dst.put(start, (byte)
(POS_MED_MIN + e));
-737 }
-738}
-739
-740// encode M by peeling off centimal
digits, encoding x as 2x+1
-741startM = dst.getPosition();
-742// TODO: 18 is an arbitrary encoding
limit. Reevaluate once we have a better handling of
-743// numeric scale.
-744for (int i = 0; i 18
abs.compareTo(BigDecimal.ZERO) != 0; i++) {
-745 abs = abs.movePointRight(2);
-746 d =
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
index 67e6eae..a83310a 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
@@ -51,10 +51,10 @@
043
044import
org.apache.hadoop.conf.Configuration;
045import
org.apache.hadoop.hbase.CellScanner;
-046import
org.apache.hadoop.hbase.HConstants;
-047import org.apache.hadoop.hbase.Server;
-048import
org.apache.yetus.audience.InterfaceAudience;
-049import
org.apache.yetus.audience.InterfaceStability;
+046import
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+047import
org.apache.hadoop.hbase.HConstants;
+048import org.apache.hadoop.hbase.Server;
+049import
org.apache.yetus.audience.InterfaceAudience;
050import
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
051import
org.apache.hadoop.hbase.security.HBasePolicyProvider;
052import
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
@@ -89,624 +89,623 @@
081 *
082 * @see BlockingRpcClient
083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class SimpleRpcServer extends
RpcServer {
-087
-088 protected int port;
// port we listen on
-089 protected InetSocketAddress address;
// inet address we listen on
-090 private int readThreads;
// number of read threads
-091
-092 protected int socketSendBufferSize;
-093 protected final long purgeTimeout;
// in milliseconds
-094
-095 // maintains the set of client
connections and handles idle timeouts
-096 private ConnectionManager
connectionManager;
-097 private Listener listener = null;
-098 protected SimpleRpcServerResponder
responder = null;
-099
-100 /** Listens on the socket. Creates jobs
for the handler threads*/
-101 private class Listener extends Thread
{
-102
-103private ServerSocketChannel
acceptChannel = null; //the accept channel
-104private Selector selector = null;
//the selector that we use for the server
-105private Reader[] readers = null;
-106private int currentReader = 0;
-107private final int
readerPendingConnectionQueueLength;
-108
-109private ExecutorService readPool;
-110
-111public Listener(final String name)
throws IOException {
-112 super(name);
-113 // The backlog of requests that we
will have the serversocket carry.
-114 int backlogLength =
conf.getInt("hbase.ipc.server.listen.queue.size", 128);
-115 readerPendingConnectionQueueLength
=
-116
conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
-117 // Create a new server socket and
set to non blocking mode
-118 acceptChannel =
ServerSocketChannel.open();
-119
acceptChannel.configureBlocking(false);
-120
-121 // Bind the server socket to the
binding addrees (can be different from the default interface)
-122 bind(acceptChannel.socket(),
bindAddress, backlogLength);
-123 port =
acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
-124 address =
(InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
-125 // create a selector;
-126 selector = Selector.open();
-127
-128 readers = new
Reader[readThreads];
-129 // Why this executor thing? Why not
like hadoop just start up all the threads? I suppose it
-130 // has an advantage in that it is
easy to shutdown the pool.
-131 readPool =
Executors.newFixedThreadPool(readThreads,
-132new
ThreadFactoryBuilder().setNameFormat(
-133 "Reader=%d,bindAddress=" +
bindAddress.getHostName() +
-134 ",port=" +
port).setDaemon(true)
-135
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
-136 for (int i = 0; i readThreads;
++i) {
-137Reader reader = new Reader();
-138readers[i] = reader;
-139readPool.execute(reader);
-140 }
-141 LOG.info(getName() + ": started " +
readThreads + " reader(s) listening on port=" + port);
-142
-143 // Register accepts on the server
socket with the selector.
-144 acceptChannel.register(selector,
SelectionKey.OP_ACCEPT);
-145 this.setName("Listener,port=" +
port);
-146 this.setDaemon(true);
-147}
+084@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG})
+085public class SimpleRpcServer extends
RpcServer {
+086
+087 protected int port;
// port we listen on
+088 protected InetSocketAddress address;
// inet address we listen on
+089
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
index 219283e..2b5d70b 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
@@ -435,1198 +435,1203 @@
427
428if (backingMap.containsKey(cacheKey))
{
429 Cacheable existingBlock =
getBlock(cacheKey, false, false, false);
-430 if
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-431throw new
RuntimeException("Cached block contents differ, which should not have
happened."
-432+ "cacheKey:" + cacheKey);
-433 }
-434 String msg = "Caching an already
cached block: " + cacheKey;
-435 msg += ". This is harmless and can
happen in rare cases (see HBASE-8547)";
-436 LOG.warn(msg);
-437 return;
-438}
-439
-440/*
-441 * Stuff the entry into the RAM cache
so it can get drained to the persistent store
-442 */
-443RAMQueueEntry re =
-444new RAMQueueEntry(cacheKey,
cachedItem, accessCount.incrementAndGet(), inMemory);
-445if (ramCache.putIfAbsent(cacheKey,
re) != null) {
-446 return;
-447}
-448int queueNum = (cacheKey.hashCode()
0x7FFF) % writerQueues.size();
-449BlockingQueueRAMQueueEntry bq
= writerQueues.get(queueNum);
-450boolean successfulAddition = false;
-451if (wait) {
-452 try {
-453successfulAddition = bq.offer(re,
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-454 } catch (InterruptedException e)
{
-455
Thread.currentThread().interrupt();
-456 }
-457} else {
-458 successfulAddition =
bq.offer(re);
-459}
-460if (!successfulAddition) {
-461 ramCache.remove(cacheKey);
-462 cacheStats.failInsert();
-463} else {
-464 this.blockNumber.increment();
-465
this.heapSize.add(cachedItem.heapSize());
-466 blocksByHFile.add(cacheKey);
-467}
-468 }
-469
-470 /**
-471 * Get the buffer of the block with the
specified key.
-472 * @param key block's cache key
-473 * @param caching true if the caller
caches blocks on cache misses
-474 * @param repeat Whether this is a
repeat lookup for the same block
-475 * @param updateCacheMetrics Whether we
should update cache metrics or not
-476 * @return buffer of specified cache
key, or null if not in cache
-477 */
-478 @Override
-479 public Cacheable getBlock(BlockCacheKey
key, boolean caching, boolean repeat,
-480 boolean updateCacheMetrics) {
-481if (!cacheEnabled) {
-482 return null;
-483}
-484RAMQueueEntry re =
ramCache.get(key);
-485if (re != null) {
-486 if (updateCacheMetrics) {
-487cacheStats.hit(caching,
key.isPrimary(), key.getBlockType());
-488 }
-489
re.access(accessCount.incrementAndGet());
-490 return re.getData();
-491}
-492BucketEntry bucketEntry =
backingMap.get(key);
-493if (bucketEntry != null) {
-494 long start = System.nanoTime();
-495 ReentrantReadWriteLock lock =
offsetLock.getLock(bucketEntry.offset());
-496 try {
-497lock.readLock().lock();
-498// We can not read here even if
backingMap does contain the given key because its offset
-499// maybe changed. If we lock
BlockCacheKey instead of offset, then we can only check
-500// existence here.
-501if
(bucketEntry.equals(backingMap.get(key))) {
-502 // TODO : change this area -
should be removed after server cells and
-503 // 12295 are available
-504 int len =
bucketEntry.getLength();
-505 if (LOG.isTraceEnabled()) {
-506LOG.trace("Read offset=" +
bucketEntry.offset() + ", len=" + len);
-507 }
-508 Cacheable cachedBlock =
ioEngine.read(bucketEntry.offset(), len,
-509
bucketEntry.deserializerReference(this.deserialiserMap));
-510 long timeTaken =
System.nanoTime() - start;
-511 if (updateCacheMetrics) {
-512cacheStats.hit(caching,
key.isPrimary(), key.getBlockType());
-513
cacheStats.ioHit(timeTaken);
-514 }
-515 if (cachedBlock.getMemoryType()
== MemoryType.SHARED) {
-516
bucketEntry.refCount.incrementAndGet();
-517 }
-518
bucketEntry.access(accessCount.incrementAndGet());
-519 if (this.ioErrorStartTime
0) {
-520ioErrorStartTime = -1;
-521 }
-522 return cachedBlock;
-523}
-524 } catch (IOException ioex) {
-525
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
index 7cece5c..6361a24 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
@@ -248,379 +248,383 @@
240 */
241CheckAndMutateBuilder
ifNotExists();
242
-243default CheckAndMutateBuilder
ifEquals(byte[] value) {
-244 return
ifMatches(CompareOperator.EQUAL, value);
-245}
-246
-247/**
-248 * @param compareOp comparison
operator to use
-249 * @param value the expected value
-250 */
-251CheckAndMutateBuilder
ifMatches(CompareOperator compareOp, byte[] value);
-252
-253/**
-254 * @param put data to put if check
succeeds
-255 * @return {@code true} if the new
put was executed, {@code false} otherwise. The return value
-256 * will be wrapped by a
{@link CompletableFuture}.
-257 */
-258CompletableFutureBoolean
thenPut(Put put);
-259
-260/**
-261 * @param delete data to delete if
check succeeds
-262 * @return {@code true} if the new
delete was executed, {@code false} otherwise. The return
-263 * value will be wrapped by a
{@link CompletableFuture}.
-264 */
-265CompletableFutureBoolean
thenDelete(Delete delete);
-266
-267/**
-268 * @param mutation mutations to
perform if check succeeds
-269 * @return true if the new mutation
was executed, false otherwise. The return value will be
-270 * wrapped by a {@link
CompletableFuture}.
-271 */
-272CompletableFutureBoolean
thenMutate(RowMutations mutation);
-273 }
-274
-275 /**
-276 * Performs multiple mutations
atomically on a single row. Currently {@link Put} and
-277 * {@link Delete} are supported.
-278 * @param mutation object that
specifies the set of mutations to perform atomically
-279 * @return A {@link CompletableFuture}
that always returns null when complete normally.
-280 */
-281 CompletableFutureVoid
mutateRow(RowMutations mutation);
-282
-283 /**
-284 * The scan API uses the observer
pattern.
-285 * @param scan A configured {@link
Scan} object.
-286 * @param consumer the consumer used to
receive results.
-287 * @see ScanResultConsumer
-288 * @see AdvancedScanResultConsumer
-289 */
-290 void scan(Scan scan, C consumer);
-291
-292 /**
-293 * Gets a scanner on the current table
for the given family.
-294 * @param family The column family to
scan.
-295 * @return A scanner.
-296 */
-297 default ResultScanner getScanner(byte[]
family) {
-298return getScanner(new
Scan().addFamily(family));
-299 }
-300
-301 /**
-302 * Gets a scanner on the current table
for the given family and qualifier.
-303 * @param family The column family to
scan.
-304 * @param qualifier The column
qualifier to scan.
-305 * @return A scanner.
-306 */
-307 default ResultScanner getScanner(byte[]
family, byte[] qualifier) {
-308return getScanner(new
Scan().addColumn(family, qualifier));
-309 }
-310
-311 /**
-312 * Returns a scanner on the current
table as specified by the {@link Scan} object.
-313 * @param scan A configured {@link
Scan} object.
-314 * @return A scanner.
-315 */
-316 ResultScanner getScanner(Scan scan);
-317
-318 /**
-319 * Return all the results that match
the given scan object.
-320 * p
-321 * Notice that usually you should use
this method with a {@link Scan} object that has limit set.
-322 * For example, if you want to get the
closest row after a given row, you could do this:
-323 * p
-324 *
-325 * pre
-326 * code
-327 * table.scanAll(new
Scan().withStartRow(row, false).setLimit(1)).thenAccept(results - {
-328 * if (results.isEmpty()) {
-329 * System.out.println("No row
after " + Bytes.toStringBinary(row));
-330 * } else {
-331 * System.out.println("The closest
row after " + Bytes.toStringBinary(row) + " is "
-332 * +
Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
-333 * }
-334 * });
-335 * /code
-336 * /pre
-337 * p
-338 * If your result set is very large,
you should use other scan method to get a scanner or use
-339 * callback to process the results.
They will do chunking to prevent OOM. The scanAll method will
-340 * fetch all the results and store them
in a List and then return the list to you.
+243/**
+244 * Check for equality.
+245 * @param value the expected value
+246 */
+247default CheckAndMutateBuilder
ifEquals(byte[] value) {
+248 return
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
index 7ce259d..2b126d3 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
@@ -738,7 +738,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createChecksumCreater28
-private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater28(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
title="class or interface in java.lang">Class?confClass)
+private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater28(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
title="class or interface in java.lang">Class?confClass)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException
Throws:
@@ -752,7 +752,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createChecksumCreater27
-private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater27(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
title="class or interface in java.lang">Class?confClass)
+private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater27(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
title="class or interface in java.lang">Class?confClass)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException
Throws:
@@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createChecksumCreater
-private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater()
+private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException,
http://docs.oracle.com/javase/8/docs/api/java/lang/ClassNotFoundException.html?is-external=true;
title="class or interface in java.lang">ClassNotFoundException
@@ -782,7 +782,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createFileCreator3
-private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator3()
+private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator3()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException
Throws:
@@ -796,7 +796,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createFileCreator2
-private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator2()
+private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator2()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException
Throws:
@@ -810,7 +810,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
createFileCreator
-private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator()
+private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
title="class or interface in java.lang">NoSuchMethodException
Throws:
@@ -824,7 +824,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
beginFileLease
-staticvoidbeginFileLease(org.apache.hadoop.hdfs.DFSClientclient,
+staticvoidbeginFileLease(org.apache.hadoop.hdfs.DFSClientclient,
longinodeId)
@@ -834,7
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-spark/dependencies.html
--
diff --git a/hbase-build-configuration/hbase-spark/dependencies.html
b/hbase-build-configuration/hbase-spark/dependencies.html
index be3032c..1997852 100644
--- a/hbase-build-configuration/hbase-spark/dependencies.html
+++ b/hbase-build-configuration/hbase-spark/dependencies.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Spark Project Dependencies
@@ -196,12 +196,18 @@
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+org.apache.hbase
+http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
+3.0.0-SNAPSHOT
+jar
+https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+
org.apache.hbase.thirdparty
http://hbase.apache.org/hbase-shaded-miscellaneous;>hbase-shaded-miscellaneous
1.0.1
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.yetus
https://yetus.apache.org/audience-annotations;>audience-annotations
0.5.0
@@ -282,20 +288,27 @@
test-jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+org.apache.hbase
+http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
+3.0.0-SNAPSHOT
+tests
+test-jar
+https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+
org.apache.spark
http://spark.apache.org/;>spark-streaming_2.10
1.6.0
tests
test-jar
http://www.apache.org/licenses/LICENSE-2.0.html;>Apache 2.0
License
-
+
org.scalamock
http://scalamock.org/;>scalamock-scalatest-support_2.10
3.1.4
-
jar
http://www.opensource.org/licenses/bsd-license.php;>BSD-style
-
+
org.scalatest
http://www.scalatest.org;>scalatest_2.10
2.2.4
@@ -800,294 +813,288 @@
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-org.apache.hbase
-http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
-3.0.0-SNAPSHOT
-jar
-https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
org.apache.hbase.thirdparty
http://hbase.apache.org/hbase-shaded-netty;>hbase-shaded-netty
1.0.1
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.hbase.thirdparty
http://hbase.apache.org/hbase-shaded-protobuf;>hbase-shaded-protobuf
1.0.1
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.htrace
http://incubator.apache.org/projects/htrace.html;>htrace-core
3.2.0-incubating
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.apache.htrace
http://incubator.apache.org/projects/htrace.html;>htrace-core4
4.2.0-incubating
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.httpcomponents
http://hc.apache.org/httpcomponents-client;>httpclient
4.5.3
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.httpcomponents
http://hc.apache.org/httpcomponents-core-ga;>httpcore
4.4.6
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
-
+
org.apache.zookeeper
zookeeper
3.4.10
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-core-asl
1.9.13
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-jaxrs
1.8.3
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0-http://www.fsf.org/licensing/licenses/lgpl.txt;>GNU Lesser General Public
License (LGPL), Version 2.1
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-mapper-asl
1.9.13
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-xc
1.8.3
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0-http://www.fsf.org/licensing/licenses/lgpl.txt;>GNU Lesser General Public
License (LGPL), Version 2.1
-
+
org.codehaus.jettison
https://github.com/jettison-json/jettison;>jettison
1.3.8
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version
2.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-http
9.3.19.v20170502
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-io
9.3.19.v20170502
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
index 892e00d..e1445dc 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
@@ -114,6 +114,10 @@
org.apache.hadoop.hbase.util
+
+org.apache.hadoop.hbase.zookeeper
+
+
@@ -352,6 +356,24 @@
+
+
+
+Uses of HBaseCommonTestingUtility in org.apache.hadoop.hbase.zookeeper
+
+Fields in org.apache.hadoop.hbase.zookeeper
declared as HBaseCommonTestingUtility
+
+Modifier and Type
+Field and Description
+
+
+
+private static HBaseCommonTestingUtility
+TestReadOnlyZKClient.UTIL
+
+
+
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
index ddd8139..30f3d80 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
-public class TestZKAsyncRegistry
+public class TestZKAsyncRegistry
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
@@ -217,7 +217,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
@@ -226,7 +226,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
REGISTRY
-private staticorg.apache.hadoop.hbase.client.ZKAsyncRegistry REGISTRY
+private staticorg.apache.hadoop.hbase.client.ZKAsyncRegistry REGISTRY
@@ -243,7 +243,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TestZKAsyncRegistry
-publicTestZKAsyncRegistry()
+publicTestZKAsyncRegistry()
@@ -260,7 +260,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
waitUntilAllReplicasHavingRegionLocation
-staticvoidwaitUntilAllReplicasHavingRegionLocation(org.apache.hadoop.hbase.TableNametbl)
+staticvoidwaitUntilAllReplicasHavingRegionLocation(org.apache.hadoop.hbase.TableNametbl)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
Throws:
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
index 462c087..f8ca93a 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
@@ -100,15 +100,10 @@ var activeTableTab = "activeTableTab";
http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
-org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
-
-
org.apache.hadoop.hbase.mapreduce.TestImportExport.TableWALActionListener
-
-
@@ -123,8 +118,9 @@ var activeTableTab = "activeTableTab";
private static class TestImportExport.TableWALActionListener
-extends org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
-This listens to the
#visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit) to
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
+implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener
+This listens to the
WALActionsListener.visitLogEntryBeforeWrite(RegionInfo, WALKey,
WALEdit) to
identify that an entry is written to the Write Ahead Log for the given
table.
@@ -132,21 +128,6 @@ extends
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from
interfaceorg.apache.hadoop.hbase.regionserver.wal.WALActionsListener
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
new file mode 100644
index 000..8a522f0
--- /dev/null
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
@@ -0,0 +1,129 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software
Foundation (ASF) under one
+003 * or more contributor license
agreements. See the NOTICE file
+004 * distributed with this work for
additional information
+005 * regarding copyright ownership. The
ASF licenses this file
+006 * to you under the Apache License,
Version 2.0 (the
+007 * "License"); you may not use this file
except in compliance
+008 * with the License. You may obtain a
copy of the License at
+009 *
+010 *
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or
agreed to in writing, software
+013 * distributed under the License is
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
+015 * See the License for the specific
language governing permissions and
+016 * limitations under the License.
+017 */
+018package
org.apache.hadoop.hbase.io.asyncfs;
+019
+020import
org.apache.yetus.audience.InterfaceAudience;
+021
+022/**
+023 * Used to predict the next send buffer
size.
+024 */
+025@InterfaceAudience.Private
+026class SendBufSizePredictor {
+027
+028 // LIMIT is 128MB
+029 private static final int LIMIT = 128 *
1024 * 1024;
+030
+031 // buf's initial capacity - 4KB
+032 private int capacity = 4 * 1024;
+033
+034 int initialSize() {
+035return capacity;
+036 }
+037
+038 int guess(int bytesWritten) {
+039// if the bytesWritten is greater
than the current capacity
+040// always increase the capacity in
powers of 2.
+041if (bytesWritten this.capacity)
{
+042 // Ensure we don't cross the
LIMIT
+043 if ((this.capacity 1)
= LIMIT) {
+044// increase the capacity in the
range of power of 2
+045this.capacity = this.capacity
1;
+046 }
+047} else {
+048 // if we see that the bytesWritten
is lesser we could again decrease
+049 // the capacity by dividing it by 2
if the bytesWritten is satisfied by
+050 // that reduction
+051 if ((this.capacity 1)
= bytesWritten) {
+052this.capacity = this.capacity
1;
+053 }
+054}
+055return this.capacity;
+056 }
+057}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
new file mode 100644
index 000..48e79b7
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
@@ -0,0 +1,309 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software
Foundation (ASF) under one
+003 * or more contributor license
agreements. See the NOTICE file
+004 * distributed with this work for
additional information
+005 * regarding copyright ownership. The
ASF licenses this file
+006 * to you under the Apache License,
Version 2.0 (the
+007 * "License"); you may not use this file
except in compliance
+008 * with the License. You may obtain a
copy of the License at
+009 * p
+010 *
http://www.apache.org/licenses/LICENSE-2.0
+011 * p
+012 * Unless required by applicable law or
agreed to in writing, software
+013 * distributed under the License is
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
+015 * See the License for the specific
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.ipc;
+019
+020import java.io.IOException;
+021import java.util.List;
+022
+023import
org.apache.hadoop.hbase.DoNotRetryIOException;
+024import
org.apache.hadoop.hbase.client.VersionInfoUtil;
+025import
org.apache.hadoop.hbase.exceptions.RequestTooBigException;
+026import
org.apache.yetus.audience.InterfaceAudience;
+027
+028import
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
+029import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
+030import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index d438f22..7c59e27 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -1290,8 +1290,8 @@
1282 CompactType
compactType) throws IOException {
1283switch (compactType) {
1284 case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName),
major,
-1286 columnFamily);
+1285
compact(this.connection.getAdminForMaster(),
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
1287break;
1288 case NORMAL:
1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
3240 new
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
3241@Override
3242public
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243 RegionInfo info =
getMobRegionInfo(tableName);
+3243 RegionInfo info =
RegionInfo.createMobRegionInfo(tableName);
3244 GetRegionInfoRequest
request =
3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
3246 GetRegionInfoResponse
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
3304}
3305break;
3306 default:
-3307throw new
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new
IllegalArgumentException("Unknown compactType: " + compactType);
3308}
3309if (state != null) {
3310 return
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
3839});
3840 }
3841
-3842 private RegionInfo
getMobRegionInfo(TableName tableName) {
-3843return
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845 }
-3846
-3847 private RpcControllerFactory
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849 }
-3850
-3851 @Override
-3852 public void addReplicationPeer(String
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853 throws IOException {
-3854executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855 @Override
-3856 protected Void rpcCall() throws
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig,
enabled));
-3859return null;
-3860 }
-3861});
-3862 }
-3863
-3864 @Override
-3865 public void
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867 @Override
-3868 protected Void rpcCall() throws
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872 }
-3873});
-3874 }
-3875
-3876 @Override
-3877 public void
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879 @Override
-3880 protected Void rpcCall() throws
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884 }
-3885});
-3886 }
-3887
-3888 @Override
-3889 public void
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891 @Override
-3892 protected Void rpcCall() throws
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896 }
-3897});
-3898 }
-3899
-3900 @Override
-3901 public ReplicationPeerConfig
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904 @Override
-3905 protected ReplicationPeerConfig
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse
response = master.getReplicationPeerConfig(
-3907 getRpcController(),
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 29ea7b3..6ed75c9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -1313,7093 +1313,7082 @@
1305
1306 @Override
1307 public boolean isSplittable() {
-1308boolean result = isAvailable()
!hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " +
result + " " + getRegionInfo().getShortNameToLog(),
-1310 new Throwable("LOGGING:
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store :
this.stores.values()) {
-1314 LOG.info("store " +
store.getColumnFamilyName());
-1315 for (HStoreFile sf :
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317 }
-1318}
-1319return result;
-1320 }
-1321
-1322 @Override
-1323 public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325 LOG.debug("Region " + this
-1326 + " is not mergeable because
it is closing or closed");
-1327 return false;
-1328}
-1329if (hasReferences()) {
-1330 LOG.debug("Region " + this
-1331 + " is not mergeable because
it has references");
-1332 return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()
!hasReferences();
+1309 }
+1310
+1311 @Override
+1312 public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314 LOG.debug("Region " + this
+1315 + " is not mergeable because
it is closing or closed");
+1316 return false;
+1317}
+1318if (hasReferences()) {
+1319 LOG.debug("Region " + this
+1320 + " is not mergeable because
it has references");
+1321 return false;
+1322}
+1323
+1324return true;
+1325 }
+1326
+1327 public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329 return
this.writestate.writesEnabled;
+1330}
+1331 }
+1332
+1333 @VisibleForTesting
+1334 public MultiVersionConcurrencyControl
getMVCC() {
+1335return mvcc;
1336 }
1337
-1338 public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340 return
this.writestate.writesEnabled;
-1341}
-1342 }
-1343
-1344 @VisibleForTesting
-1345 public MultiVersionConcurrencyControl
getMVCC() {
-1346return mvcc;
-1347 }
-1348
-1349 @Override
-1350 public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338 @Override
+1339 public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341 }
+1342
+1343 /**
+1344 * @return readpoint considering given
IsolationLevel. Pass {@code null} for default
+1345 */
+1346 public long
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null
isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348 // This scan can read even
uncommitted transactions
+1349 return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
1352 }
1353
-1354 /**
-1355 * @return readpoint considering given
IsolationLevel. Pass {@code null} for default
-1356 */
-1357 public long
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null
isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359 // This scan can read even
uncommitted transactions
-1360 return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363 }
-1364
-1365 public boolean
isLoadingCfsOnDemandDefault() {
-1366return
this.isLoadingCfsOnDemandDefault;
-1367 }
-1368
-1369 /**
-1370 * Close down this HRegion. Flush the
cache, shut down each HStore, don't
-1371 * service any more calls.
-1372 *
-1373 * pThis method could take
some time to execute, so don't call it from a
-1374 * time-sensitive thread.
-1375 *
-1376 * @return Vector of all the storage
files that the HRegion's component
-1377 * HStores make use of. It's a list
of all StoreFile objects. Returns empty
-1378 * vector if already closed and null
if judged that it should not close.
-1379 *
-1380 * @throws IOException e
-1381 * @throws DroppedSnapshotException
Thrown when replay of wal is required
-1382 * because a Snapshot was not properly
persisted. The region is put in closing mode, and the
-1383 * caller MUST abort after this.
-1384 */
-1385 public Mapbyte[],
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387 }
-1388
-1389 private final Object closeLock = new
Object();
-1390
-1391 /** Conf key for the periodic flush
interval */
-1392 public static final String
MEMSTORE_PERIODIC_FLUSH_INTERVAL =
-1393
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
index 9098105..b05691f 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
@@ -37,1514 +37,1514 @@
029import java.util.ArrayList;
030import java.util.Iterator;
031import java.util.List;
-032
-033import
org.apache.hadoop.hbase.KeyValue.Type;
-034import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-035import
org.apache.hadoop.hbase.io.HeapSize;
-036import
org.apache.hadoop.hbase.io.TagCompressionContext;
-037import
org.apache.hadoop.hbase.io.util.Dictionary;
-038import
org.apache.hadoop.hbase.io.util.StreamUtils;
-039import
org.apache.hadoop.hbase.util.ByteBufferUtils;
-040import
org.apache.hadoop.hbase.util.ByteRange;
-041import
org.apache.hadoop.hbase.util.Bytes;
-042import
org.apache.hadoop.hbase.util.ClassSize;
-043import
org.apache.yetus.audience.InterfaceAudience;
-044
-045import
com.google.common.annotations.VisibleForTesting;
-046
-047/**
-048 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
-049 * rich set of APIs than those in {@link
CellUtil} for internal usage.
-050 */
-051@InterfaceAudience.Private
-052// TODO : Make Tag IA.LimitedPrivate and
move some of the Util methods to CP exposed Util class
-053public class PrivateCellUtil {
+032import java.util.Optional;
+033
+034import
org.apache.hadoop.hbase.KeyValue.Type;
+035import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+036import
org.apache.hadoop.hbase.io.HeapSize;
+037import
org.apache.hadoop.hbase.io.TagCompressionContext;
+038import
org.apache.hadoop.hbase.io.util.Dictionary;
+039import
org.apache.hadoop.hbase.io.util.StreamUtils;
+040import
org.apache.hadoop.hbase.util.ByteBufferUtils;
+041import
org.apache.hadoop.hbase.util.ByteRange;
+042import
org.apache.hadoop.hbase.util.Bytes;
+043import
org.apache.hadoop.hbase.util.ClassSize;
+044import
org.apache.yetus.audience.InterfaceAudience;
+045
+046import
com.google.common.annotations.VisibleForTesting;
+047
+048/**
+049 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
+050 * rich set of APIs than those in {@link
CellUtil} for internal usage.
+051 */
+052@InterfaceAudience.Private
+053public final class PrivateCellUtil {
054
055 /**
056 * Private constructor to keep this
class from being instantiated.
057 */
058 private PrivateCellUtil() {
-059
-060 }
-061
-062 /*** ByteRange
***/
-063
-064 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
-065return range.set(cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
-066 }
-067
-068 public static ByteRange
fillFamilyRange(Cell cell, ByteRange range) {
-069return
range.set(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength());
-070 }
-071
-072 public static ByteRange
fillQualifierRange(Cell cell, ByteRange range) {
-073return
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-074 cell.getQualifierLength());
-075 }
-076
-077 public static ByteRange
fillValueRange(Cell cell, ByteRange range) {
-078return
range.set(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
-079 }
-080
-081 public static ByteRange
fillTagRange(Cell cell, ByteRange range) {
-082return range.set(cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength());
-083 }
-084
-085 /**
-086 * Returns tag value in a new byte
array. If server-side, use {@link Tag#getValueArray()} with
-087 * appropriate {@link
Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
-088 * allocations.
-089 * @param cell
-090 * @return tag value in a new byte
array.
-091 */
-092 public static byte[] getTagsArray(Cell
cell) {
-093byte[] output = new
byte[cell.getTagsLength()];
-094copyTagsTo(cell, output, 0);
-095return output;
-096 }
-097
-098 public static byte[] cloneTags(Cell
cell) {
-099byte[] output = new
byte[cell.getTagsLength()];
-100copyTagsTo(cell, output, 0);
-101return output;
-102 }
-103
-104 /**
-105 * Copies the tags info into the tag
portion of the cell
-106 * @param cell
-107 * @param destination
-108 * @param destinationOffset
-109 * @return position after tags
+059 }
+060
+061 /*** ByteRange
***/
+062
+063 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
+064return range.set(cell.getRowArray(),
cell.getRowOffset(),
1 - 100 of 297 matches
Mail list logo
|