http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
index 4729dfd..503a09a 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
@@ -1,10 +1,10 @@
http://www.w3.org/TR/html4/loose.dtd;>
-
+
-æ¥å£ org.apache.hadoop.hbase.CellBuilderçä½¿ç¨ (Apache HBase
3.0.0-SNAPSHOT API)
+Uses of Interface org.apache.hadoop.hbase.CellBuilder (Apache HBase
3.0.0-SNAPSHOT API)
@@ -12,7 +12,7 @@
-æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã
+JavaScript is disabled on your browser.
-è·³è¿å¯¼èªé¾æ¥
+Skip navigation links
-
-æ¦è§
-ç¨åºå
-ç±»
-使ç¨
-æ
-å·²è¿æ¶
-ç´¢å¼
-帮å©
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
-ä¸ä¸ä¸ª
-ä¸ä¸ä¸ª
+Prev
+Next
-æ¡æ¶
-æ æ¡æ¶
+Frames
+NoFrames
-ææç±»
+AllClasses
-
-
-使ç¨CellBuilderçç¨åºå
+
+Packages that use CellBuilder
-ç¨åºå
|
-说æ |
+Package |
+Description |
@@ -94,70 +94,70 @@
-
-
-
-è¿åCellBuilderçorg.apache.hadoop.hbaseä¸çæ¹æ³
+
+
+Methods in org.apache.hadoop.hbase
that return CellBuilder
-éå®ç¬¦åç±»å |
-æ¹æ³å说æ |
+Modifier and Type |
+Method and Description |
-CellBuilder |
+[09/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 5cca62a..37e55ec 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -1,10 +1,10 @@
http://www.w3.org/TR/html4/loose.dtd;>
-
+
-Uses of Interface org.apache.hadoop.hbase.Cell (Apache HBase
3.0.0-SNAPSHOT API)
+æ¥å£ org.apache.hadoop.hbase.Cellçä½¿ç¨ (Apache HBase
3.0.0-SNAPSHOT API)
@@ -12,7 +12,7 @@
-JavaScript is disabled on your browser.
+æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã
-Skip navigation links
+è·³è¿å¯¼èªé¾æ¥
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+æ¦è§
+ç¨åºå
+ç±»
+使ç¨
+æ
+å·²è¿æ¶
+ç´¢å¼
+帮å©
-Prev
-Next
+ä¸ä¸ä¸ª
+ä¸ä¸ä¸ª
-Frames
-NoFrames
+æ¡æ¶
+æ æ¡æ¶
-AllClasses
+ææç±»
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
index 05e032c..40ef9f4 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
@@ -25,767 +25,805 @@
017 */
018package
org.apache.hadoop.hbase.io.asyncfs;
019
-020import static
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+020import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+021import static
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
022
-023import
org.apache.hbase.thirdparty.com.google.common.base.Charsets;
-024import
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-025import
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-026import
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-027import
com.google.protobuf.CodedOutputStream;
-028
-029import
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
-030import
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
-031import
org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
-032import
org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
-033import
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-034import
org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
-035import
org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
-036import
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
-037import
org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
-038import
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
-039import
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
-040import
org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-041import
org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
-042import
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
-043import
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-044import
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
-045import
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
-046import
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
-047
-048import java.io.IOException;
-049import java.lang.reflect.Field;
-050import
java.lang.reflect.InvocationTargetException;
-051import java.lang.reflect.Method;
-052import java.net.InetAddress;
-053import java.net.InetSocketAddress;
-054import java.nio.ByteBuffer;
-055import
java.security.GeneralSecurityException;
-056import java.util.Arrays;
-057import java.util.Collections;
-058import java.util.List;
-059import java.util.Map;
-060import java.util.Set;
-061import java.util.concurrent.TimeUnit;
-062import
java.util.concurrent.atomic.AtomicBoolean;
-063
-064import
javax.security.auth.callback.Callback;
-065import
javax.security.auth.callback.CallbackHandler;
-066import
javax.security.auth.callback.NameCallback;
-067import
javax.security.auth.callback.PasswordCallback;
-068import
javax.security.auth.callback.UnsupportedCallbackException;
-069import
javax.security.sasl.RealmCallback;
-070import
javax.security.sasl.RealmChoiceCallback;
-071import javax.security.sasl.Sasl;
-072import javax.security.sasl.SaslClient;
-073import
javax.security.sasl.SaslException;
-074
-075import
org.apache.commons.codec.binary.Base64;
-076import
org.apache.commons.lang3.StringUtils;
-077import
org.apache.hadoop.conf.Configuration;
-078import
org.apache.hadoop.crypto.CipherOption;
-079import
org.apache.hadoop.crypto.CipherSuite;
-080import
org.apache.hadoop.crypto.CryptoCodec;
-081import
org.apache.hadoop.crypto.Decryptor;
-082import
org.apache.hadoop.crypto.Encryptor;
-083import
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
-084import
org.apache.hadoop.fs.FileEncryptionInfo;
-085import
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import com.google.protobuf.ByteString;
-090import
org.apache.hadoop.hdfs.DFSClient;
-091import
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-092import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index c10cfbf..a3e2f4a 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -3371,7 +3371,7 @@
3363private V result = null;
3364
3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
3367
3368public ProcedureFuture(final
HBaseAdmin admin, final Long procId) {
3369 this.admin = admin;
@@ -3653,653 +3653,651 @@
3645 * @return a description of the
operation
3646 */
3647protected String getDescription()
{
-3648 return "Operation: " +
getOperationType() + ", "
-3649 + "Table Name: " +
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class
TableWaitForStateCallable implements WaitForStateCallable {
-3654 @Override
-3655 public void
throwInterruptedException() throws InterruptedIOException {
-3656throw new
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on
table: " + tableName.getNameWithNamespaceInclAsString());
-3658 }
-3659
-3660 @Override
-3661 public void
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString()
+ " has not completed after " + elapsedTime + "ms");
-3664 }
-3665}
-3666
-3667@Override
-3668protected V
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException,
TimeoutException {
-3670 LOG.info(getDescription() + "
completed");
-3671 return
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException,
TimeoutException {
-3677 LOG.info(getDescription() + "
failed with " + exception.getMessage());
-3678 return
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void
waitForTableEnabled(final long deadlineTs)
-3682throws IOException,
TimeoutException {
-3683 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int
tries) throws IOException {
-3686 try {
-3687if
(getAdmin().isTableAvailable(tableName)) {
-3688 return true;
-3689}
-3690 } catch
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " +
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled,
sleeping. tries=" + tries);
-3693 }
-3694 return false;
-3695}
-3696 });
-3697}
-3698
-3699protected void
waitForTableDisabled(final long deadlineTs)
-3700throws IOException,
TimeoutException {
-3701 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int
tries) throws IOException {
-3704 return
getAdmin().isTableDisabled(tableName);
-3705}
-3706 });
-3707}
-3708
-3709protected void
waitTableNotFound(final long deadlineTs)
-3710throws IOException,
TimeoutException {
-3711 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int
tries) throws IOException {
-3714 return
!getAdmin().tableExists(tableName);
-3715}
-3716 });
-3717}
-3718
-3719protected void
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException,
TimeoutException {
-3721 waitForState(deadlineTs, new
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int
tries) throws IOException {
-3724 return
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726 });
-3727}
-3728
-3729protected void
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException,
TimeoutException {
-3731 final TableDescriptor desc =
getTableDescriptor();
-3732 final AtomicInteger actualRegCount
= new AtomicInteger(0);
-3733 final MetaTableAccessor.Visitor
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
index 82070ed..7c0c94d 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
-private class CleanerChore.CleanerTask
+private class CleanerChore.CleanerTask
extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true;
title="class or interface in java.util.concurrent">RecursiveTaskhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Boolean
Attemps to clean up a directory, its subdirectories, and
files.
Return value is true if everything was deleted. false on partial / total
failures.
@@ -259,7 +259,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
dir
-private finalorg.apache.hadoop.fs.Path dir
+private finalorg.apache.hadoop.fs.Path dir
@@ -268,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
root
-private finalboolean root
+private finalboolean root
@@ -285,7 +285,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
CleanerTask
-CleanerTask(org.apache.hadoop.fs.FileStatusdir,
+CleanerTask(org.apache.hadoop.fs.FileStatusdir,
booleanroot)
@@ -295,7 +295,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
CleanerTask
-CleanerTask(org.apache.hadoop.fs.Pathdir,
+CleanerTask(org.apache.hadoop.fs.Pathdir,
booleanroot)
@@ -313,7 +313,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
compute
-protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Booleancompute()
+protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Booleancompute()
Specified by:
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true#compute--;
title="class or interface in java.util.concurrent">computein
classhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true;
title="class or interface in java.util.concurrent">RecursiveTaskhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Boolean
@@ -326,16 +326,14 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
getFilteredStatus
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusgetFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicateorg.apache.hadoop.fs.FileStatusfunction)
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.fs.FileStatusgetFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicateorg.apache.hadoop.fs.FileStatusfunction)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
-Get FileStatus with filter.
- Pay attention that FSUtils #listStatusWithStatusFilter would return null,
- even though status is empty but not null.
+Get FileStatus with filter.
Parameters:
function - a filter function
Returns:
-filtered FileStatus or null if dir doesn't exist
+filtered FileStatus or empty list if dir doesn't exist
Throws:
https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException - if there's an
error other than dir not existing
@@ -347,7 +345,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
deleteAction
-privatebooleandeleteAction(CleanerChore.Actionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Booleandeletion,
+privatebooleandeleteAction(CleanerChore.Actionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Booleandeletion,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringtype)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
index 683aed9..92e4f86 100644
---
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
+++
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
@@ -686,7 +686,7 @@ implements
hashCode
-publicinthashCode()
+publicinthashCode()
Overrides:
https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
title="class or interface in java.lang">hashCodein
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
@@ -699,7 +699,7 @@ implements
equals
-publicbooleanequals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Objectobj)
+publicbooleanequals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Objectobj)
Overrides:
https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
title="class or interface in java.lang">equalsin
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
index 91b1ef5..41a1aa4 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
@@ -18,7 +18,7 @@
catch(err) {
}
//-->
-var methods =
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6};
+var methods =
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance
Methods"],4:["t3","Abstract Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
@@ -133,123 +133,129 @@ public interface
long
+getCpRequestCount()
+Get the total number of CoprocessorService requests that
have been issued against this region
+
+
+
+long
getFilteredReadRequestCount()
Get the total number of filtered read requests that have
been issued against this region
-
+
long
getLastMajorCompactionAge()
-
+
long
getMaxCompactionQueueSize()
Note that this metric is updated periodically and hence
might miss some data points.
-
+
long
getMaxFlushQueueSize()
Note that this metric is updated periodically and hence
might miss some data points.
-
+
long
getMaxStoreFileAge()
-
+
long
getMemStoreSize()
Get the size of the memstore on this region server.
-
+
long
getMinStoreFileAge()
-
+
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
getNamespace()
Get the name of the namespace this table is in.
-
+
long
getNumBytesCompacted()
-
+
long
getNumCompactionsCompleted()
-
+
long
getNumCompactionsFailed()
Returns the total number of compactions that have been
reported as failed on this region.
-
+
long
getNumCompactionsQueued()
-
+
long
getNumFilesCompacted()
-
+
long
getNumFlushesQueued()
-
+
long
getNumReferenceFiles()
-
+
long
getNumStoreFiles()
Get the number of store files hosted on this region
server.
-
+
long
getNumStores()
Get the number of stores hosted on this region server.
-
+
long
getReadRequestCount()
Get the total number of read requests that have been issued
against this region
-
+
int
getRegionHashCode()
-
+
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
getRegionName()
Get the name of the region.
-
+
int
getReplicaId()
Get the replica id of this region.
-
+
long
getStoreFileSize()
Get the total size of the store files this region server is
serving from.
-
+
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
getTableName()
Get the name of the
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 74bacd8..546d2b6 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2249,1468 +2249,1484 @@
2241 }
2242
2243 @Override
-2244 public long addColumn(
-2245 final TableName tableName,
-2246 final ColumnFamilyDescriptor
column,
-2247 final long nonceGroup,
-2248 final long nonce)
-2249 throws IOException {
-2250checkInitialized();
-2251checkTableExists(tableName);
-2252
-2253TableDescriptor old =
getTableDescriptors().get(tableName);
-2254if
(old.hasColumnFamily(column.getName())) {
-2255 throw new
InvalidFamilyOperationException("Column family '" + column.getNameAsString()
-2256 + "' in table '" + tableName +
"' already exists so cannot be added");
-2257}
+2244 public long addColumn(final TableName
tableName, final ColumnFamilyDescriptor column,
+2245 final long nonceGroup, final long
nonce) throws IOException {
+2246checkInitialized();
+2247checkTableExists(tableName);
+2248
+2249return modifyTable(tableName, new
TableDescriptorGetter() {
+2250
+2251 @Override
+2252 public TableDescriptor get()
throws IOException {
+2253TableDescriptor old =
getTableDescriptors().get(tableName);
+2254if
(old.hasColumnFamily(column.getName())) {
+2255 throw new
InvalidFamilyOperationException("Column family '" + column.getNameAsString()
+2256 + "' in table '" +
tableName + "' already exists so cannot be added");
+2257}
2258
-2259TableDescriptor newDesc =
TableDescriptorBuilder
-2260
.newBuilder(old).setColumnFamily(column).build();
-2261return modifyTable(tableName,
newDesc, nonceGroup, nonce);
+2259return
TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
+2260 }
+2261}, nonceGroup, nonce);
2262 }
2263
-2264 @Override
-2265 public long modifyColumn(
-2266 final TableName tableName,
-2267 final ColumnFamilyDescriptor
descriptor,
-2268 final long nonceGroup,
-2269 final long nonce)
-2270 throws IOException {
-2271checkInitialized();
-2272checkTableExists(tableName);
-2273
-2274TableDescriptor old =
getTableDescriptors().get(tableName);
-2275if (!
old.hasColumnFamily(descriptor.getName())) {
-2276 throw new
InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
-2277 + "' does not exist, so it
cannot be modified");
-2278}
-2279
-2280TableDescriptor td =
TableDescriptorBuilder
-2281.newBuilder(old)
-2282
.modifyColumnFamily(descriptor)
-2283.build();
-2284
-2285return modifyTable(tableName, td,
nonceGroup, nonce);
-2286 }
-2287
-2288 @Override
-2289 public long deleteColumn(
-2290 final TableName tableName,
-2291 final byte[] columnName,
-2292 final long nonceGroup,
-2293 final long nonce)
-2294 throws IOException {
-2295checkInitialized();
-2296checkTableExists(tableName);
-2297
-2298TableDescriptor old =
getTableDescriptors().get(tableName);
-2299
-2300if (!
old.hasColumnFamily(columnName)) {
-2301 throw new
InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
-2302 + "' does not exist, so it
cannot be deleted");
-2303}
-2304if (old.getColumnFamilyCount() == 1)
{
-2305 throw new
InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
-2306 + "' is the only column family
in the table, so it cannot be deleted");
-2307}
-2308
-2309TableDescriptor td =
TableDescriptorBuilder
-2310
.newBuilder(old).removeColumnFamily(columnName).build();
-2311return modifyTable(tableName, td,
nonceGroup, nonce);
-2312 }
-2313
-2314 @Override
-2315 public long enableTable(final
TableName tableName, final long nonceGroup, final long nonce)
-2316 throws IOException {
-2317checkInitialized();
-2318
-2319return
MasterProcedureUtil.submitProcedure(
-2320new
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2321 @Override
-2322 protected void run() throws
IOException {
-2323
getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
-2324
-2325// Normally, it would make sense
for this authorization check to exist inside
-2326// AccessController, but because
the authorization check is done based on internal state
-2327// (rather than
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
index e73540e..7b680e9 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
@@ -28,135 +28,100 @@
020
021import java.io.IOException;
022import java.util.List;
-023import java.util.Set;
-024import
org.apache.hadoop.hbase.HConstants;
-025import
org.apache.hadoop.hbase.ServerName;
-026import
org.apache.hadoop.hbase.client.RegionInfo;
-027import
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-028import
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-029import
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-030import
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-031import
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-032import
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-033import
org.apache.yetus.audience.InterfaceAudience;
-034import
org.apache.zookeeper.KeeperException;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038/**
-039 * Used by the HMaster on startup to
split meta logs and assign the meta table.
-040 */
-041@InterfaceAudience.Private
-042public class MasterMetaBootstrap {
-043 private static final Logger LOG =
LoggerFactory.getLogger(MasterMetaBootstrap.class);
-044
-045 private final HMaster master;
-046
-047 public MasterMetaBootstrap(HMaster
master) {
-048this.master = master;
-049 }
-050
-051 public void recoverMeta() throws
InterruptedException, IOException {
-052// This is a blocking call that waits
until hbase:meta is deployed.
-053master.recoverMeta();
-054// Now we can start the
TableStateManager. It is backed by hbase:meta.
-055
master.getTableStateManager().start();
-056// Enable server crash procedure
handling
-057enableCrashedServerProcessing();
-058 }
-059
-060 public void processDeadServers() {
-061// get a list for previously failed
RS which need log splitting work
-062// we recover hbase:meta region
servers inside master initialization and
-063// handle other failed servers in SSH
in order to start up master node ASAP
-064SetServerName
previouslyFailedServers =
-065
master.getMasterWalManager().getFailedServersFromLogFolders();
-066
-067// Master has recovered hbase:meta
region server and we put
-068// other failed region servers in a
queue to be handled later by SSH
-069for (ServerName tmpServer :
previouslyFailedServers) {
-070
master.getServerManager().processDeadServer(tmpServer, true);
-071}
-072 }
-073
-074 /**
-075 * For assigning hbase:meta replicas
only.
-076 * TODO: The way this assign runs,
nothing but chance to stop all replicas showing up on same
-077 * server as the hbase:meta region.
-078 */
-079 protected void assignMetaReplicas()
-080 throws IOException,
InterruptedException, KeeperException {
-081int numReplicas =
master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
-082
HConstants.DEFAULT_META_REPLICA_NUM);
-083if (numReplicas = 1) {
-084 // No replicaas to assign.
Return.
-085 return;
-086}
-087final AssignmentManager
assignmentManager = master.getAssignmentManager();
-088if
(!assignmentManager.isMetaInitialized()) {
-089 throw new
IllegalStateException("hbase:meta must be initialized first before we can " +
-090 "assign out its replicas");
-091}
-092ServerName metaServername =
-093
this.master.getMetaTableLocator().getMetaRegionLocation(this.master.getZooKeeper());
-094for (int i = 1; i numReplicas;
i++) {
-095 // Get current meta state for
replica from zk.
-096 RegionState metaState =
MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i);
-097 RegionInfo hri =
RegionReplicaUtil.getRegionInfoForReplica(
-098
RegionInfoBuilder.FIRST_META_REGIONINFO, i);
-099
LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper="
+ metaState);
-100 if
(metaServername.equals(metaState.getServerName())) {
-101metaState = null;
-102
LOG.info(hri.getRegionNameAsString() +
-103 " old location is same as
current hbase:meta location; setting location as null...");
-104 }
-105 // These assigns run inline. All is
blocked till they complete. Only interrupt is shutting
-106 // down hosting server which calls
AM#stop.
-107 if (metaState != null
metaState.getServerName() != null) {
-108// Try to retain old
assignment.
-109assignmentManager.assign(hri,
metaState.getServerName());
-110 } else {
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
index 83c17c0..9df0225 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
@@ -54,323 +54,362 @@
046import org.apache.hadoop.io.IOUtils;
047
048import
org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
-049
+049import
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
050
-051/**
-052 * Compression in this class is lifted
off Compressor/KeyValueCompression.
-053 * This is a pure coincidence... they are
independent and don't have to be compatible.
-054 *
-055 * This codec is used at server side for
writing cells to WAL as well as for sending edits
-056 * as part of the distributed splitting
process.
-057 */
-058@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
-059 HBaseInterfaceAudience.PHOENIX,
HBaseInterfaceAudience.CONFIG})
-060public class WALCellCodec implements
Codec {
-061 /** Configuration key for the class to
use when encoding cells in the WAL */
-062 public static final String
WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
-063
-064 protected final CompressionContext
compression;
-065 protected final ByteStringUncompressor
statelessUncompressor = new ByteStringUncompressor() {
-066@Override
-067public byte[] uncompress(ByteString
data, Dictionary dict) throws IOException {
-068 return
WALCellCodec.uncompressByteString(data, dict);
-069}
-070 };
-071
-072 /**
-073 * bAll subclasses must
implement a no argument constructor/b
-074 */
-075 public WALCellCodec() {
-076this.compression = null;
-077 }
-078
-079 /**
-080 * Default constructor - ball
subclasses must implement a constructor with this signature /b
-081 * if they are to be dynamically loaded
from the {@link Configuration}.
-082 * @param conf configuration to
configure ttthis/tt
-083 * @param compression compression the
codec should support, can be ttnull/tt to indicate no
-084 * compression
-085 */
-086 public WALCellCodec(Configuration conf,
CompressionContext compression) {
-087this.compression = compression;
-088 }
-089
-090 public static String
getWALCellCodecClass(Configuration conf) {
-091return
conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-092 }
-093
-094 /**
-095 * Create and setup a {@link
WALCellCodec} from the {@code cellCodecClsName} and
-096 * CompressionContext, if {@code
cellCodecClsName} is specified.
-097 * Otherwise Cell Codec classname is
read from {@link Configuration}.
-098 * Fully prepares the codec for use.
-099 * @param conf {@link Configuration} to
read for the user-specified codec. If none is specified,
-100 * uses a {@link
WALCellCodec}.
-101 * @param cellCodecClsName name of
codec
-102 * @param compression compression the
codec should use
-103 * @return a {@link WALCellCodec} ready
for use.
-104 * @throws
UnsupportedOperationException if the codec cannot be instantiated
-105 */
-106
-107 public static WALCellCodec
create(Configuration conf, String cellCodecClsName,
-108 CompressionContext compression)
throws UnsupportedOperationException {
-109if (cellCodecClsName == null) {
-110 cellCodecClsName =
getWALCellCodecClass(conf);
-111}
-112return
ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
-113{ Configuration.class,
CompressionContext.class }, new Object[] { conf, compression });
-114 }
-115
-116 /**
-117 * Create and setup a {@link
WALCellCodec} from the
-118 * CompressionContext.
-119 * Cell Codec classname is read from
{@link Configuration}.
-120 * Fully prepares the codec for use.
-121 * @param conf {@link Configuration} to
read for the user-specified codec. If none is specified,
-122 * uses a {@link
WALCellCodec}.
-123 * @param compression compression the
codec should use
-124 * @return a {@link WALCellCodec} ready
for use.
-125 * @throws
UnsupportedOperationException if the codec cannot be instantiated
-126 */
-127 public static WALCellCodec
create(Configuration conf,
-128 CompressionContext compression)
throws UnsupportedOperationException {
-129String cellCodecClsName =
getWALCellCodecClass(conf);
-130return
ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
-131{ Configuration.class,
CompressionContext.class }, new Object[] { conf, compression });
-132 }
-133
-134
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
index 49f4e5a..e1f22c3 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
@@ -30,188 +30,191 @@
022import
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
023import
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
024import
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation;
-025import
org.apache.hadoop.hbase.procedure2.Procedure;
-026import
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-027import
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-028import
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-029import
org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
-030import
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-031import
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-032import
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-033import
org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable;
-034import
org.apache.yetus.audience.InterfaceAudience;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-039import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType;
-040import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
-041import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerStateData;
-042
-043@InterfaceAudience.Private
-044public class RefreshPeerProcedure extends
ProcedureMasterProcedureEnv
-045implements PeerProcedureInterface,
RemoteProcedureMasterProcedureEnv, ServerName {
-046
-047 private static final Logger LOG =
LoggerFactory.getLogger(RefreshPeerProcedure.class);
-048
-049 private String peerId;
-050
-051 private PeerOperationType type;
-052
-053
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
"IS2_INCONSISTENT_SYNC",
-054 justification = "Will never change
after construction")
-055 private ServerName targetServer;
-056
-057 private boolean dispatched;
-058
-059 private ProcedureEvent?
event;
-060
-061 private boolean succ;
-062
-063 public RefreshPeerProcedure() {
-064 }
-065
-066 public RefreshPeerProcedure(String
peerId, PeerOperationType type, ServerName targetServer) {
-067this.peerId = peerId;
-068this.type = type;
-069this.targetServer = targetServer;
-070 }
-071
-072 @Override
-073 public String getPeerId() {
-074return peerId;
-075 }
-076
-077 @Override
-078 public PeerOperationType
getPeerOperationType() {
-079return PeerOperationType.REFRESH;
-080 }
-081
-082 private static PeerModificationType
toPeerModificationType(PeerOperationType type) {
-083switch (type) {
-084 case ADD:
-085return
PeerModificationType.ADD_PEER;
-086 case REMOVE:
-087return
PeerModificationType.REMOVE_PEER;
-088 case ENABLE:
-089return
PeerModificationType.ENABLE_PEER;
-090 case DISABLE:
-091return
PeerModificationType.DISABLE_PEER;
-092 case UPDATE_CONFIG:
-093return
PeerModificationType.UPDATE_PEER_CONFIG;
-094 default:
-095throw new
IllegalArgumentException("Unknown type: " + type);
-096}
-097 }
-098
-099 private static PeerOperationType
toPeerOperationType(PeerModificationType type) {
-100switch (type) {
-101 case ADD_PEER:
-102return PeerOperationType.ADD;
-103 case REMOVE_PEER:
-104return
PeerOperationType.REMOVE;
-105 case ENABLE_PEER:
-106return
PeerOperationType.ENABLE;
-107 case DISABLE_PEER:
-108return
PeerOperationType.DISABLE;
-109 case UPDATE_PEER_CONFIG:
-110return
PeerOperationType.UPDATE_CONFIG;
-111 default:
-112throw new
IllegalArgumentException("Unknown type: " + type);
-113}
-114 }
-115
-116 @Override
-117 public RemoteOperation
remoteCallBuild(MasterProcedureEnv env, ServerName remote) {
-118assert targetServer.equals(remote);
-119return new ServerOperation(this,
getProcId(), RefreshPeerCallable.class,
-120
RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
-121
.setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
-122 }
-123
-124 private void
complete(MasterProcedureEnv
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
index cbf76ed..1756883 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
@@ -103,93 +103,98 @@
095done.run(null);
096 }
097
-098
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
-099convert(PrepareBulkLoadRequest
request)
-100 throws
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
{
-101byte [] bytes =
request.toByteArray();
-102
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder
-103 builder =
-104
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.
-105newBuilder();
-106builder.mergeFrom(bytes);
-107return builder.build();
-108 }
-109
-110 @Override
-111 public void
cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request,
-112
RpcCallbackCleanupBulkLoadResponse done) {
-113try {
-114 SecureBulkLoadManager
secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
-115
secureBulkLoadManager.cleanupBulkLoad((HRegion) this.env.getRegion(),
convert(request));
-116
done.run(CleanupBulkLoadResponse.newBuilder().build());
-117} catch (IOException e) {
-118
CoprocessorRpcUtils.setControllerException(controller, e);
-119}
-120done.run(null);
-121 }
-122
-123 /**
-124 * Convert from CPEP protobuf 2.5 to
internal protobuf 3.3.
-125 * @throws
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
-126 */
-127
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest
-128 convert(CleanupBulkLoadRequest
request)
-129 throws
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
{
-130byte [] bytes =
request.toByteArray();
-131
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder
-132builder =
-133
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.
-134 newBuilder();
-135builder.mergeFrom(bytes);
-136return builder.build();
-137 }
-138
-139 @Override
-140 public void
secureBulkLoadHFiles(RpcController controller, SecureBulkLoadHFilesRequest
request,
-141
RpcCallbackSecureBulkLoadHFilesResponse done) {
-142boolean loaded = false;
-143Mapbyte[], ListPath
map = null;
-144try {
-145 SecureBulkLoadManager
secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
-146 BulkLoadHFileRequest
bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request);
-147 map =
secureBulkLoadManager.secureBulkLoadHFiles((HRegion) this.env.getRegion(),
-148
convert(bulkLoadHFileRequest));
-149 loaded = map != null
!map.isEmpty();
-150} catch (IOException e) {
-151
CoprocessorRpcUtils.setControllerException(controller, e);
-152}
-153
done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
-154 }
-155
-156
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest
-157 convert(BulkLoadHFileRequest request)
-158 throws
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
{
-159byte [] bytes =
request.toByteArray();
-160
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
-161builder =
-162
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.
-163newBuilder();
-164builder.mergeFrom(bytes);
-165return builder.build();
-166 }
-167
-168 private BulkLoadHFileRequest
ConvertSecureBulkLoadHFilesRequest(
-169 SecureBulkLoadHFilesRequest
request) {
-170BulkLoadHFileRequest.Builder
bulkLoadHFileRequest = BulkLoadHFileRequest.newBuilder();
-171RegionSpecifier region =
-172
ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, this.env
-173
.getRegionInfo().getRegionName());
-174
bulkLoadHFileRequest.setRegion(region).setFsToken(request.getFsToken())
-175
.setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum())
-176
.addAllFamilyPath(request.getFamilyPathList());
-177return
bulkLoadHFileRequest.build();
-178 }
-179
-180 @Override
-181 public IterableService
getServices() {
-182return Collections.singleton(this);
+098 /**
+099 *
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
index efd186d..30a1259 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
@@ -58,1008 +58,1012 @@
050 public final static int NEXT_BIT_MASK =
1 7;
051 @VisibleForTesting
052 final static boolean UNSAFE_AVAIL =
UnsafeAvailChecker.isAvailable();
-053 @VisibleForTesting
-054 final static boolean UNSAFE_UNALIGNED =
UnsafeAvailChecker.unaligned();
-055
-056 private ByteBufferUtils() {
-057 }
-058
-059 /**
-060 * Similar to {@link
WritableUtils#writeVLong(java.io.DataOutput, long)},
-061 * but writes to a {@link
ByteBuffer}.
-062 */
-063 public static void
writeVLong(ByteBuffer out, long i) {
-064if (i = -112 i =
127) {
-065 out.put((byte) i);
-066 return;
-067}
-068
-069int len = -112;
-070if (i 0) {
-071 i ^= -1L; // take one's
complement
-072 len = -120;
-073}
-074
-075long tmp = i;
-076while (tmp != 0) {
-077 tmp = tmp 8;
-078 len--;
-079}
-080
-081out.put((byte) len);
-082
-083len = (len -120) ? -(len + 120)
: -(len + 112);
-084
-085for (int idx = len; idx != 0; idx--)
{
-086 int shiftbits = (idx - 1) * 8;
-087 long mask = 0xFFL
shiftbits;
-088 out.put((byte) ((i mask)
shiftbits));
-089}
-090 }
-091
-092 /**
-093 * Similar to {@link
WritableUtils#readVLong(DataInput)} but reads from a
-094 * {@link ByteBuffer}.
-095 */
-096 public static long readVLong(ByteBuffer
in) {
-097byte firstByte = in.get();
-098int len =
WritableUtils.decodeVIntSize(firstByte);
-099if (len == 1) {
-100 return firstByte;
-101}
-102long i = 0;
-103for (int idx = 0; idx len-1;
idx++) {
-104 byte b = in.get();
-105 i = i 8;
-106 i = i | (b 0xFF);
-107}
-108return
(WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
-109 }
+053 public final static boolean
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
+054
+055 private ByteBufferUtils() {
+056 }
+057
+058 /**
+059 * Similar to {@link
WritableUtils#writeVLong(java.io.DataOutput, long)},
+060 * but writes to a {@link
ByteBuffer}.
+061 */
+062 public static void
writeVLong(ByteBuffer out, long i) {
+063if (i = -112 i =
127) {
+064 out.put((byte) i);
+065 return;
+066}
+067
+068int len = -112;
+069if (i 0) {
+070 i ^= -1L; // take one's
complement
+071 len = -120;
+072}
+073
+074long tmp = i;
+075while (tmp != 0) {
+076 tmp = tmp 8;
+077 len--;
+078}
+079
+080out.put((byte) len);
+081
+082len = (len -120) ? -(len + 120)
: -(len + 112);
+083
+084for (int idx = len; idx != 0; idx--)
{
+085 int shiftbits = (idx - 1) * 8;
+086 long mask = 0xFFL
shiftbits;
+087 out.put((byte) ((i mask)
shiftbits));
+088}
+089 }
+090
+091 /**
+092 * Similar to {@link
WritableUtils#readVLong(DataInput)} but reads from a
+093 * {@link ByteBuffer}.
+094 */
+095 public static long readVLong(ByteBuffer
in) {
+096byte firstByte = in.get();
+097int len =
WritableUtils.decodeVIntSize(firstByte);
+098if (len == 1) {
+099 return firstByte;
+100}
+101long i = 0;
+102for (int idx = 0; idx len-1;
idx++) {
+103 byte b = in.get();
+104 i = i 8;
+105 i = i | (b 0xFF);
+106}
+107return
(WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
+108 }
+109
110
-111
-112 /**
-113 * Put in buffer integer using 7 bit
encoding. For each written byte:
-114 * 7 bits are used to store value
-115 * 1 bit is used to indicate whether
there is next bit.
-116 * @param value Int to be compressed.
-117 * @param out Where to put compressed
data
-118 * @return Number of bytes written.
-119 * @throws IOException on stream
error
-120 */
-121 public static int
putCompressedInt(OutputStream out, final int value)
-122 throws IOException {
-123int i = 0;
-124int tmpvalue = value;
-125do {
-126 byte b = (byte) (tmpvalue
VALUE_MASK);
-127 tmpvalue =
NEXT_BIT_SHIFT;
-128 if (tmpvalue != 0) {
-129b |= (byte) NEXT_BIT_MASK;
-130 }
-131 out.write(b);
-132 i++;
-133} while (tmpvalue != 0);
-134return i;
-135 }
-136
-137 /**
-138* Put in output stream 32 bit integer
(Big Endian byte order).
-139* @param out Where to put integer.
-140* @param value Value of integer.
-141* @throws IOException On stream
error.
-142*/
-143 public static void putInt(OutputStream
out, final int value)
-144
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
index 3f8844b..cdb9398 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
@@ -140,2712 +140,2713 @@
132public class PerformanceEvaluation
extends Configured implements Tool {
133 static final String RANDOM_SEEK_SCAN =
"randomSeekScan";
134 static final String RANDOM_READ =
"randomRead";
-135 private static final Logger LOG =
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136 private static final ObjectMapper
MAPPER = new ObjectMapper();
-137 static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139 }
-140
-141 public static final String TABLE_NAME =
"TestTable";
-142 public static final String
FAMILY_NAME_BASE = "info";
-143 public static final byte[] FAMILY_ZERO
= Bytes.toBytes("info0");
-144 public static final byte[] COLUMN_ZERO
= Bytes.toBytes("" + 0);
-145 public static final int
DEFAULT_VALUE_LENGTH = 1000;
-146 public static final int ROW_LENGTH =
26;
-147
-148 private static final int ONE_GB = 1024
* 1024 * 1000;
-149 private static final int
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150 // TODO : should we make this
configurable
-151 private static final int TAG_LENGTH =
256;
-152 private static final DecimalFormat FMT
= new DecimalFormat("0.##");
-153 private static final MathContext CXT =
MathContext.DECIMAL64;
-154 private static final BigDecimal
MS_PER_SEC = BigDecimal.valueOf(1000);
-155 private static final BigDecimal
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156 private static final TestOptions
DEFAULT_OPTS = new TestOptions();
-157
-158 private static MapString,
CmdDescriptor COMMANDS = new TreeMap();
-159 private static final Path PERF_EVAL_DIR
= new Path("performance_evaluation");
-160
-161 static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173 "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175 "Run random seek and scan 100
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177 "Run random seek scan with both
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179 "Run random seek scan with both
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181 "Run random seek scan with both
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183 "Run random seek scan with both
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185 "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187 "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189 "Run sequential write test");
-190addCommandDescriptor(ScanTest.class,
"scan",
-191 "Run scan test (read every
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193 "Run scan test using a filter to
find a specific row based on it's value " +
-194 "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196 "Increment on each row; clients
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198 "Append on each row; clients
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200 "CheckAndMutate on each row;
clients overlap on keyspace so some concurrent
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index 4c42811..0bc3ddb 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -563,381 +563,390 @@
555// If this is first time we've
been put off, then emit a log message.
556if (fqe.getRequeueCount() =
0) {
557 // Note: We don't impose
blockingStoreFiles constraint on meta regions
-558 LOG.warn("Region " +
region.getRegionInfo().getEncodedName() + " has too many " +
-559"store files; delaying flush
up to " + this.blockingWaitTime + "ms");
-560 if
(!this.server.compactSplitThread.requestSplit(region)) {
-561try {
-562
this.server.compactSplitThread.requestSystemCompaction(region,
-563
Thread.currentThread().getName());
-564} catch (IOException e) {
-565 e = e instanceof
RemoteException ?
-566
((RemoteException)e).unwrapRemoteException() : e;
-567 LOG.error("Cache flush
failed for region " +
-568
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-569}
-570 }
-571}
-572
-573// Put back on the queue. Have
it come back out of the queue
-574// after a delay of
this.blockingWaitTime / 100 ms.
-575
this.flushQueue.add(fqe.requeue(this.blockingWaitTime / 100));
-576// Tell a lie, it's not flushed
but it's ok
-577return true;
-578 }
-579}
-580return flushRegion(region, false,
fqe.isForceFlushAllStores(), fqe.getTracker());
-581 }
-582
-583 /**
-584 * Flush a region.
-585 * @param region Region to flush.
-586 * @param emergencyFlush Set if we are
being force flushed. If true the region
-587 * needs to be removed from the flush
queue. If false, when we were called
-588 * from the main flusher run loop and
we got the entry to flush by calling
-589 * poll on the flush queue (which
removed it).
-590 * @param forceFlushAllStores whether
we want to flush all store.
-591 * @return true if the region was
successfully flushed, false otherwise. If
-592 * false, there will be accompanying
log messages explaining why the region was
-593 * not flushed.
-594 */
-595 private boolean flushRegion(HRegion
region, boolean emergencyFlush, boolean forceFlushAllStores,
-596 FlushLifeCycleTracker tracker) {
-597synchronized (this.regionsInQueue)
{
-598 FlushRegionEntry fqe =
this.regionsInQueue.remove(region);
-599 // Use the start time of the
FlushRegionEntry if available
-600 if (fqe != null
emergencyFlush) {
-601// Need to remove from region
from delay queue. When NOT an
-602// emergencyFlush, then item was
removed via a flushQueue.poll.
-603flushQueue.remove(fqe);
-604 }
-605}
-606
-607tracker.beforeExecution();
-608lock.readLock().lock();
-609try {
-610 notifyFlushRequest(region,
emergencyFlush);
-611 FlushResult flushResult =
region.flushcache(forceFlushAllStores, false, tracker);
-612 boolean shouldCompact =
flushResult.isCompactionNeeded();
-613 // We just want to check the size
-614 boolean shouldSplit =
region.checkSplit() != null;
-615 if (shouldSplit) {
-616
this.server.compactSplitThread.requestSplit(region);
-617 } else if (shouldCompact) {
-618
server.compactSplitThread.requestSystemCompaction(region,
Thread.currentThread().getName());
-619 }
-620} catch (DroppedSnapshotException ex)
{
-621 // Cache flush can fail in a few
places. If it fails in a critical
-622 // section, we get a
DroppedSnapshotException and a replay of wal
-623 // is required. Currently the only
way to do this is a restart of
-624 // the server. Abort because hdfs
is probably bad (HBASE-644 is a case
-625 // where hdfs was bad but passed
the hdfs check).
-626 server.abort("Replay of WAL
required. Forcing server shutdown", ex);
-627 return false;
-628} catch (IOException ex) {
-629 ex = ex instanceof RemoteException
? ((RemoteException) ex).unwrapRemoteException() : ex;
-630 LOG.error(
-631"Cache flush failed"
-632+ (region != null ? (" for
region " +
-633
Bytes.toStringBinary(region.getRegionInfo().getRegionName()))
-634 : ""), ex);
-635 if (!server.checkFileSystem()) {
-636return false;
-637
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
index 2510283..418c60c 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
@@ -77,77 +77,77 @@
069import
org.apache.hadoop.hbase.client.RowMutations;
070import
org.apache.hadoop.hbase.client.Scan;
071import
org.apache.hadoop.hbase.client.Table;
-072import
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import
org.apache.hadoop.hbase.filter.Filter;
-074import
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import
org.apache.hadoop.hbase.filter.FilterList;
-076import
org.apache.hadoop.hbase.filter.PageFilter;
-077import
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import
org.apache.hadoop.hbase.io.compress.Compression;
-080import
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import
org.apache.hadoop.hbase.regionserver.BloomType;
-084import
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import
org.apache.hadoop.hbase.trace.TraceUtil;
-088import
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import
org.apache.hadoop.hbase.util.Bytes;
-090import
org.apache.hadoop.hbase.util.Hash;
-091import
org.apache.hadoop.hbase.util.MurmurHash;
-092import
org.apache.hadoop.hbase.util.Pair;
-093import
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import
org.apache.hadoop.mapreduce.Mapper;
-098import
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import
org.apache.hadoop.util.ToolRunner;
-103import
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import
org.apache.htrace.core.TraceScope;
-106import
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase
performance and scalability. Runs a HBase
-114 * client that steps through one of a set
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random
writes test, etc.). Pass on the
-116 * command-line which test to run and how
many clients are participating in
-117 * this experiment. Run {@code
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs
the evaluation programs described in
-120 * Section 7, iPerformance
Evaluation/i, of the a
-121 *
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation
extends Configured implements Tool {
-131 static final String RANDOM_SEEK_SCAN =
"randomSeekScan";
-132 static final String RANDOM_READ =
"randomRead";
-133 private static final Logger LOG =
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134 private static final ObjectMapper
MAPPER = new ObjectMapper();
-135 static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137 }
-138
-139 public static final String TABLE_NAME =
"TestTable";
-140 public static final byte[] FAMILY_NAME
= Bytes.toBytes("info");
-141 public static final byte [] COLUMN_ZERO
= Bytes.toBytes("" + 0);
-142 public static final byte []
QUALIFIER_NAME = COLUMN_ZERO;
+072import
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import
org.apache.hadoop.hbase.filter.Filter;
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
index 386285f..ae1e807 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
PrevClass
-NextClass
+NextClass
Frames
@@ -74,14 +74,14 @@ var activeTableTab = "activeTableTab";
Summary:
-Nested|
-Field|
+Nested|
+Field|
Constr|
Method
Detail:
-Field|
+Field|
Constr|
Method
@@ -142,7 +142,7 @@ var activeTableTab = "activeTableTab";
-public static class TestReplicator.FailureInjectingReplicationEndpointForTest
+public static class TestReplicator.FailureInjectingReplicationEndpointForTest
extends TestReplicator.ReplicationEndpointForTest
@@ -156,35 +156,6 @@ extends
-Nested Classes
-
-Modifier and Type
-Class and Description
-
-
-(package private) static class
-TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
-
-
-class
-TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest
-
-
-
-
-
-
-Nested classes/interfaces inherited from
classorg.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest
-TestReplicator.ReplicationEndpointForTest.ReplicatorForTest
-
-
-
-
-
-Nested classes/interfaces inherited from
classorg.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
-org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.Replicator
-
@@ -214,6 +185,24 @@ extends
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicBoolean
+failNext
+
+
+
+
+
+
+Fields inherited from
classorg.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest
+batchCount,
entriesCount
+
@@ -253,7 +242,7 @@ extends Method and Description
-protected
org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.Replicator
+protected https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
title="class or interface in java.lang">Integer
createReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
intordinal)
@@ -270,7 +259,7 @@ extends
@@ -315,6 +304,23 @@ extends
+
+
+
+
+
+Field Detail
+
+
+
+
+
+failNext
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicBoolean failNext
+
+
+
+
@@ -327,7 +333,7 @@ extends
FailureInjectingReplicationEndpointForTest
-publicFailureInjectingReplicationEndpointForTest()
+publicFailureInjectingReplicationEndpointForTest()
@@ -344,8 +350,8 @@ extends
createReplicator
-protectedorg.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.ReplicatorcreateReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
-
intordinal)
+protectedhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
title="class or interface in java.lang">IntegercreateReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
+ intordinal)
Overrides:
createReplicatorin
classTestReplicator.ReplicationEndpointForTest
@@ -381,7 +387,7 @@ extends
PrevClass
-NextClass
+NextClass
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index 8302e28..c370eb9 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -2113,3031 +2113,3033 @@
2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
2106tableName + " unable to
delete dangling table state " + tableState);
2107 }
-2108} else {
-2109
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110 tableName + " has dangling
table state " + tableState);
-2111}
-2112 }
-2113}
-2114// check that all tables have
states
-2115for (TableName tableName :
tablesInfo.keySet()) {
-2116 if (isTableIncluded(tableName)
!tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118
MetaTableAccessor.updateTableState(connection, tableName,
TableState.State.ENABLED);
-2119 TableState newState =
MetaTableAccessor.getTableState(connection, tableName);
-2120 if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state
for table " + tableName + " in meta ");
-2123 }
-2124} else {
-2125
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126 tableName + " has no state
in meta ");
-2127}
-2128 }
-2129}
-2130 }
-2131
-2132 private void preCheckPermission()
throws IOException, AccessDeniedException {
-2133if
(shouldIgnorePreCheckPermission()) {
-2134 return;
-2135}
-2136
-2137Path hbaseDir =
FSUtils.getRootDir(getConf());
-2138FileSystem fs =
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider =
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi =
userProvider.getCurrent().getUGI();
-2141FileStatus[] files =
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143 try {
-2144FSUtils.checkAccess(ugi, file,
FsAction.WRITE);
-2145 } catch (AccessDeniedException
ace) {
-2146LOG.warn("Got
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " +
ugi.getUserName()
-2148 + " does not have write perms
to " + file.getPath()
-2149 + ". Please rerun hbck as hdfs
user " + file.getOwner());
-2150throw ace;
-2151 }
-2152}
-2153 }
-2154
-2155 /**
-2156 * Deletes region from meta table
-2157 */
-2158 private void deleteMetaRegion(HbckInfo
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160 }
-2161
-2162 /**
-2163 * Deletes region from meta table
-2164 */
-2165 private void deleteMetaRegion(byte[]
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " +
Bytes.toString(metaKey) + " from META" );
-2169 }
-2170
-2171 /**
-2172 * Reset the split parent region info
in meta table
-2173 */
-2174 private void resetSplitParent(HbckInfo
hi) throws IOException {
-2175RowMutations mutations = new
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri =
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p =
MetaTableAccessor.makePutFromRegionInfo(hri,
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " +
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190 }
-2191
-2192 /**
-2193 * This backwards-compatibility
wrapper for permanently offlining a region
-2194 * that should not be alive. If the
region server does not support the
-2195 * "offline" method, it will use the
closest unassign method instead. This
-2196 * will basically work until one
attempts to disable or delete the affected
-2197 * table. The problem has to do with
in-memory only master state, so
-2198 * restarting the HMaster or failing
over to another should fix this.
-2199 */
-2200 private void offline(byte[]
regionName) throws IOException {
-2201String regionString =
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 79bf967..c8b113b 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -115,3514 +115,3517 @@
107import
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
108import
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
109import
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-110import
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-111import
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-112import
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-113import
org.apache.hadoop.hbase.master.locking.LockManager;
-114import
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-115import
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-116import
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-117import
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-118import
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-119import
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-120import
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-121import
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-122import
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-123import
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-124import
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-125import
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-126import
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-127import
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-128import
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-129import
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-130import
org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
-131import
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-132import
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-133import
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-134import
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-135import
org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
-136import
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-137import
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-138import
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-139import
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-140import
org.apache.hadoop.hbase.mob.MobConstants;
-141import
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-142import
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-143import
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-144import
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-145import
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-146import
org.apache.hadoop.hbase.procedure2.LockedResource;
-147import
org.apache.hadoop.hbase.procedure2.Procedure;
-148import
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-149import
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-150import
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-151import
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-152import
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-153import
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-154import
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
-155import
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-156import
org.apache.hadoop.hbase.quotas.QuotaUtil;
-157import
org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
-158import
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-159import
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-160import
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-161import
org.apache.hadoop.hbase.regionserver.HRegionServer;
-162import
org.apache.hadoop.hbase.regionserver.HStore;
-163import
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-164import
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-165import
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-166import
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-167import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
index 1643124..a0f8712 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
@InterfaceAudience.Private
-class AsyncTableImpl
+class AsyncTableImpl
extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements AsyncTableScanResultConsumer
Just a wrapper of RawAsyncTableImpl. The
difference is that users need to provide a
@@ -384,7 +384,7 @@ implements
rawTable
-private finalAsyncTableAdvancedScanResultConsumer rawTable
+private finalAsyncTableAdvancedScanResultConsumer rawTable
@@ -393,7 +393,7 @@ implements
pool
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
title="class or interface in java.util.concurrent">ExecutorService pool
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
title="class or interface in java.util.concurrent">ExecutorService pool
@@ -410,7 +410,7 @@ implements
AsyncTableImpl
-AsyncTableImpl(AsyncConnectionImplconn,
+AsyncTableImpl(AsyncConnectionImplconn,
AsyncTableAdvancedScanResultConsumerrawTable,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
title="class or interface in
java.util.concurrent">ExecutorServicepool)
@@ -429,7 +429,7 @@ implements
getName
-publicTableNamegetName()
+publicTableNamegetName()
Description copied from
interface:AsyncTable
Gets the fully qualified table name instance of this
table.
@@ -444,7 +444,7 @@ implements
getConfiguration
-publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
+publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
Description copied from
interface:AsyncTable
Returns the Configuration object used by this
instance.
@@ -461,7 +461,7 @@ implements
getRpcTimeout
-publiclonggetRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
+publiclonggetRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
Description copied from
interface:AsyncTable
Get timeout of each rpc request in this Table instance. It
will be overridden by a more
specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
@@ -484,7 +484,7 @@ implements
getReadRpcTimeout
-publiclonggetReadRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
+publiclonggetReadRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
Description copied from
interface:AsyncTable
Get timeout of each rpc read request in this Table
instance.
@@ -503,7 +503,7 @@ implements
getWriteRpcTimeout
-publiclonggetWriteRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
+publiclonggetWriteRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
Description copied from
interface:AsyncTable
Get timeout of each rpc write request in this Table
instance.
@@ -522,7 +522,7 @@ implements
getOperationTimeout
-publiclonggetOperationTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
+publiclonggetOperationTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
Description copied from
interface:AsyncTable
Get timeout of each operation in Table instance.
@@ -541,7 +541,7 @@ implements
getScanTimeout
-publiclonggetScanTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
title="class or interface in
java.util.concurrent">TimeUnitunit)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index ef30022..abeccf1 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
+TableRecordReader.createKey()
ImmutableBytesWritable
-TableRecordReader.createKey()
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
ImmutableBytesWritable
@@ -183,9 +183,11 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-
org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Builds a TableRecordReader.
+
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
@@ -195,11 +197,9 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
- org.apache.hadoop.mapred.Reporterreporter)
-Builds a TableRecordReader.
-
+
org.apache.hadoop.mapred.Reporterreporter)
@@ -218,10 +218,12 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
- Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+ Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
void
@@ -234,21 +236,19 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-IdentityTableMap.map(ImmutableBytesWritablekey,
- Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+ Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+ org.apache.hadoop.mapred.Reporterreporter)
boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
+TableRecordReader.next(ImmutableBytesWritablekey,
Resultvalue)
boolean
-TableRecordReader.next(ImmutableBytesWritablekey,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
Resultvalue)
@@ -281,10 +281,12 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
- Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+ Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
void
@@ -297,12 +299,10 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-IdentityTableMap.map(ImmutableBytesWritablekey,
- Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+ Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+ org.apache.hadoop.mapred.Reporterreporter)
void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
private ImmutableBytesWritable
-TableRecordReaderImpl.key
+MultithreadedTableMapper.SubMapRecordReader.key
private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key
+TableRecordReaderImpl.key
(package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
index 09c0b9c..0e0fe7d 100644
---
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
+++
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
@@ -113,7 +113,8 @@ var activeTableTab = "activeTableTab";
-public class WriteHeavyIncrementObserver
+@InterfaceAudience.Private
+public class WriteHeavyIncrementObserver
extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements RegionCoprocessor, RegionObserver
An example for implementing a counter that reads is much
less than writes, i.e, write heavy.
@@ -360,7 +361,7 @@ implements
mask
-private finalint mask
+private finalint mask
@@ -369,7 +370,7 @@ implements
lastTimestamps
-private finalorg.apache.commons.lang3.mutable.MutableLong[] lastTimestamps
+private finalorg.apache.commons.lang3.mutable.MutableLong[] lastTimestamps
@@ -386,7 +387,7 @@ implements
WriteHeavyIncrementObserver
-publicWriteHeavyIncrementObserver()
+publicWriteHeavyIncrementObserver()
@@ -403,7 +404,7 @@ implements
getRegionObserver
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
title="class or interface in java.util">OptionalRegionObservergetRegionObserver()
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
title="class or interface in java.util">OptionalRegionObservergetRegionObserver()
Specified by:
getRegionObserverin
interfaceRegionCoprocessor
@@ -416,7 +417,7 @@ implements
preFlushScannerOpen
-publicvoidpreFlushScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+publicvoidpreFlushScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
ScanOptionsoptions,
FlushLifeCycleTrackertracker)
@@ -442,7 +443,7 @@ implements
createCell
-privateCellcreateCell(byte[]row,
+privateCellcreateCell(byte[]row,
byte[]family,
byte[]qualifier,
longts,
@@ -455,7 +456,7 @@ implements
wrap
-privateInternalScannerwrap(byte[]family,
+privateInternalScannerwrap(byte[]family,
InternalScannerscanner)
@@ -465,7 +466,7 @@ implements
preFlush
-publicInternalScannerpreFlush(ObserverContextRegionCoprocessorEnvironmentc,
+publicInternalScannerpreFlush(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
InternalScannerscanner,
FlushLifeCycleTrackertracker)
@@ -494,7 +495,7 @@ implements
preCompactScannerOpen
-publicvoidpreCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+publicvoidpreCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
ScanTypescanType,
ScanOptionsoptions,
@@ -525,7 +526,7 @@ implements
preCompact
-publicInternalScannerpreCompact(ObserverContextRegionCoprocessorEnvironmentc,
+publicInternalScannerpreCompact(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
InternalScannerscanner,
ScanTypescanType,
@@ -564,7 +565,7 @@ implements
preMemStoreCompactionCompactScannerOpen
-publicvoidpreMemStoreCompactionCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
+publicvoidpreMemStoreCompactionCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
ScanOptionsoptions)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
@@ -591,7 +592,7 @@ implements
preMemStoreCompactionCompact
-publicInternalScannerpreMemStoreCompactionCompact(ObserverContextRegionCoprocessorEnvironmentc,
+publicInternalScannerpreMemStoreCompactionCompact(ObserverContextRegionCoprocessorEnvironmentc,
Storestore,
InternalScannerscanner)
throws
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
index 9971079..03c8b000 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
@@ -49,1067 +49,1082 @@
041import org.apache.hadoop.fs.Path;
042import
org.apache.hadoop.hbase.HConstants;
043import
org.apache.hadoop.hbase.client.RegionInfo;
-044import
org.apache.hadoop.hbase.trace.TraceUtil;
-045import
org.apache.hadoop.hbase.util.Bytes;
-046import
org.apache.hadoop.hbase.util.ClassSize;
-047import
org.apache.hadoop.hbase.util.FSUtils;
-048import
org.apache.hadoop.hbase.util.HasThread;
-049import
org.apache.hadoop.hbase.util.Threads;
-050import
org.apache.hadoop.hbase.wal.FSHLogProvider;
-051import
org.apache.hadoop.hbase.wal.WALEdit;
-052import
org.apache.hadoop.hbase.wal.WALKeyImpl;
-053import
org.apache.hadoop.hbase.wal.WALProvider.Writer;
-054import
org.apache.hadoop.hdfs.DFSOutputStream;
-055import
org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-056import
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-057import
org.apache.htrace.core.TraceScope;
-058import
org.apache.yetus.audience.InterfaceAudience;
-059import org.slf4j.Logger;
-060import org.slf4j.LoggerFactory;
-061import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-062
-063/**
-064 * The default implementation of FSWAL.
-065 */
-066@InterfaceAudience.Private
-067public class FSHLog extends
AbstractFSWALWriter {
-068 // IMPLEMENTATION NOTES:
-069 //
-070 // At the core is a ring buffer. Our
ring buffer is the LMAX Disruptor. It tries to
-071 // minimize synchronizations and
volatile writes when multiple contending threads as is the case
-072 // here appending and syncing on a
single WAL. The Disruptor is configured to handle multiple
-073 // producers but it has one consumer
only (the producers in HBase are IPC Handlers calling append
-074 // and then sync). The single
consumer/writer pulls the appends and syncs off the ring buffer.
-075 // When a handler calls sync, it is
given back a future. The producer 'blocks' on the future so
-076 // it does not return until the sync
completes. The future is passed over the ring buffer from
-077 // the producer/handler to the consumer
thread where it does its best to batch up the producer
-078 // syncs so one WAL sync actually spans
multiple producer sync invocations. How well the
-079 // batching works depends on the write
rate; i.e. we tend to batch more in times of
-080 // high writes/syncs.
-081 //
-082 // Calls to append now also wait until
the append has been done on the consumer side of the
-083 // disruptor. We used to not wait but
it makes the implementation easier to grok if we have
-084 // the region edit/sequence id after
the append returns.
-085 //
-086 // TODO: Handlers need to coordinate
appending AND syncing. Can we have the threads contend
-087 // once only? Probably hard given syncs
take way longer than an append.
-088 //
-089 // The consumer threads pass the syncs
off to multiple syncing threads in a round robin fashion
-090 // to ensure we keep up back-to-back FS
sync calls (FS sync calls are the long poll writing the
-091 // WAL). The consumer thread passes the
futures to the sync threads for it to complete
-092 // the futures when done.
-093 //
-094 // The 'sequence' in the below is the
sequence of the append/sync on the ringbuffer. It
-095 // acts as a sort-of transaction id. It
is always incrementing.
-096 //
-097 // The RingBufferEventHandler class
hosts the ring buffer consuming code. The threads that
-098 // do the actual FS sync are
implementations of SyncRunner. SafePointZigZagLatch is a
-099 // synchronization class used to halt
the consumer at a safe point -- just after all outstanding
-100 // syncs and appends have completed --
so the log roller can swap the WAL out under it.
-101 //
-102 // We use ring buffer sequence as txid
of FSWALEntry and SyncFuture.
-103 private static final Logger LOG =
LoggerFactory.getLogger(FSHLog.class);
-104
-105 /**
-106 * The nexus at which all incoming
handlers meet. Does appends and sync with an ordering. Appends
-107 * and syncs are each put on the ring
which means handlers need to smash up against the ring twice
-108 * (can we make it once only? ... maybe
not since time to append is so different from time to sync
-109 * and sometimes we don't want to sync
or we want to async the sync). The ring is where we make
-110 * sure of our ordering and it is also
where we do batching up of
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
index 3bc66bb..97aa79c 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -1435,459 +1435,460 @@
1427 */
1428 private void execProcedure(final
RootProcedureState procStack,
1429 final
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of
this procedure will be unsuspended later by an external event
-1435// such the report of a region open.
TODO: Currently, its possible for two worker threads
-1436// to be working on the same
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can
make for issues if both threads are changing state.
-1439// See
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler
making it possible for two threads running against
-1442// the one Procedure. Might be ok if
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[]
subprocs = null;
-1449do {
-1450 reExecute = false;
-1451 try {
-1452subprocs =
procedure.doExecute(getEnvironment());
-1453if (subprocs != null
subprocs.length == 0) {
-1454 subprocs = null;
-1455}
-1456 } catch
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458 LOG.trace("Suspend " +
procedure);
-1459}
-1460suspended = true;
-1461 } catch (ProcedureYieldException
e) {
-1462if (LOG.isTraceEnabled()) {
-1463 LOG.trace("Yield " + procedure
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467 } catch (InterruptedException e)
{
-1468if (LOG.isTraceEnabled()) {
-1469 LOG.trace("Yield interrupt " +
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474 } catch (Throwable e) {
-1475// Catch NullPointerExceptions
or similar errors...
-1476String msg = "CODE-BUG: Uncaught
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new
RemoteProcedureException(msg, e));
-1479 }
-1480
-1481 if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483 if (subprocs.length == 1
subprocs[0] == procedure) {
-1484// Procedure returned
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled())
{
-1489 LOG.trace("Short-circuit
to next step on pid=" + procedure.getProcId());
-1490}
-1491 } else {
-1492// Yield the current
procedure, and make the subprocedure runnable
-1493// subprocs may come back
'null'.
-1494subprocs =
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized
subprocedures=" +
-1496 (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499 }
-1500} else if (procedure.getState()
== ProcedureState.WAITING_TIMEOUT) {
-1501 if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to
timeoutExecutor " + procedure);
-1503 }
-1504
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506 // No subtask, so we are
done
-1507
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
index 7226d18..f065ddb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
@@ -45,733 +45,736 @@
037import org.apache.hadoop.fs.FileSystem;
038import org.apache.hadoop.fs.Path;
039import org.apache.hadoop.fs.PathFilter;
-040import
org.apache.yetus.audience.InterfaceAudience;
-041import org.slf4j.Logger;
-042import org.slf4j.LoggerFactory;
-043import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-044import
org.apache.hadoop.hbase.client.TableDescriptor;
-045import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-046import
org.apache.hadoop.hbase.Coprocessor;
-047import
org.apache.hadoop.hbase.exceptions.DeserializationException;
-048import
org.apache.hadoop.hbase.HConstants;
-049import
org.apache.hadoop.hbase.regionserver.BloomType;
-050import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-051import
org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
-052import
org.apache.hadoop.hbase.TableDescriptors;
-053import
org.apache.hadoop.hbase.TableInfoMissingException;
-054import
org.apache.hadoop.hbase.TableName;
-055
-056/**
-057 * Implementation of {@link
TableDescriptors} that reads descriptors from the
-058 * passed filesystem. It expects
descriptors to be in a file in the
-059 * {@link #TABLEINFO_DIR} subdir of the
table's directory in FS. Can be read-only
-060 * -- i.e. does not modify the
filesystem or can be read and write.
-061 *
-062 * pAlso has utility for keeping
up the table descriptors tableinfo file.
-063 * The table schema file is kept in the
{@link #TABLEINFO_DIR} subdir
-064 * of the table directory in the
filesystem.
-065 * It has a {@link
#TABLEINFO_FILE_PREFIX} and then a suffix that is the
-066 * edit sequenceid: e.g.
code.tableinfo.03/code. This sequenceid
-067 * is always increasing. It starts at
zero. The table schema file with the
-068 * highest sequenceid has the most recent
schema edit. Usually there is one file
-069 * only, the most recent but there may be
short periods where there are more
-070 * than one file. Old files are
eventually cleaned. Presumption is that there
-071 * will not be lots of concurrent clients
making table schema edits. If so,
-072 * the below needs a bit of a reworking
and perhaps some supporting api in hdfs.
-073 */
-074@InterfaceAudience.Private
-075public class FSTableDescriptors
implements TableDescriptors {
-076 private static final Logger LOG =
LoggerFactory.getLogger(FSTableDescriptors.class);
-077 private final FileSystem fs;
-078 private final Path rootdir;
-079 private final boolean fsreadonly;
-080 private volatile boolean usecache;
-081 private volatile boolean fsvisited;
-082
-083 @VisibleForTesting
-084 long cachehits = 0;
+040import
org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
+041import
org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+042import
org.apache.yetus.audience.InterfaceAudience;
+043import org.slf4j.Logger;
+044import org.slf4j.LoggerFactory;
+045import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+046import
org.apache.hadoop.hbase.client.TableDescriptor;
+047import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+048import
org.apache.hadoop.hbase.Coprocessor;
+049import
org.apache.hadoop.hbase.exceptions.DeserializationException;
+050import
org.apache.hadoop.hbase.HConstants;
+051import
org.apache.hadoop.hbase.regionserver.BloomType;
+052import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+053import
org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
+054import
org.apache.hadoop.hbase.TableDescriptors;
+055import
org.apache.hadoop.hbase.TableInfoMissingException;
+056import
org.apache.hadoop.hbase.TableName;
+057
+058/**
+059 * Implementation of {@link
TableDescriptors} that reads descriptors from the
+060 * passed filesystem. It expects
descriptors to be in a file in the
+061 * {@link #TABLEINFO_DIR} subdir of the
table's directory in FS. Can be read-only
+062 * -- i.e. does not modify the
filesystem or can be read and write.
+063 *
+064 * pAlso has utility for keeping
up the table descriptors tableinfo file.
+065 * The table schema file is kept in the
{@link #TABLEINFO_DIR} subdir
+066 * of the table directory in the
filesystem.
+067 * It has a {@link
#TABLEINFO_FILE_PREFIX} and then a suffix that is the
+068 * edit sequenceid: e.g.
code.tableinfo.03/code. This sequenceid
+069 * is always increasing. It
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 0f17e58..7ae1c82 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -18,7 +18,7 @@
catch(err) {
}
//-->
-var methods =
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":41,"i93":41,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10};
+var methods =
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":41,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10};
var tabs = {65535:["t0","All Methods"],1:["t1","Static
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete
Methods"],32:["t6","Deprecated Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
@InterfaceAudience.Private
-public class HStore
+public class HStore
extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
A Store holds a column family in a Region. Its a memstore
and a set of zero
@@ -242,103 +242,111 @@ implements cryptoContext
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
title="class or interface in
java.util.concurrent.atomic">AtomicInteger
+currentParallelPutCount
+
+
private HFileDataBlockEncoder
dataBlockEncoder
-
+
static long
DEEP_OVERHEAD
-
+
static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
DEFAULT_BLOCK_STORAGE_POLICY
-
+
static int
DEFAULT_BLOCKING_STOREFILE_COUNT
-
+
static int
DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
-
+
private static int
DEFAULT_FLUSH_RETRIES_NUMBER
-
+
private ColumnFamilyDescriptor
family
-
+
private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListHStoreFile
filesCompacting
-
+
static long
FIXED_OVERHEAD
-
+
private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
title="class or interface in
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
index c7d05d1..abcb738 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
@@ -143,18 +143,18 @@
void
-HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
-
-
-void
NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
-
+
void
HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
Save metadata in HFile which will be written to disk
+
+void
+HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
+
@@ -203,18 +203,18 @@
-void
-RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
+abstract void
+BloomContext.addLastBloomKey(HFile.Writerwriter)
+Adds the last bloom key to the HFile Writer as part of
StorefileWriter close.
+
void
RowBloomContext.addLastBloomKey(HFile.Writerwriter)
-abstract void
-BloomContext.addLastBloomKey(HFile.Writerwriter)
-Adds the last bloom key to the HFile Writer as part of
StorefileWriter close.
-
+void
+RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
static BloomFilterWriter
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
index b55ecd8..e1139cc 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
@@ -106,15 +106,15 @@
+private HFileBlock.Writer
+HFileBlockIndex.BlockIndexWriter.blockWriter
+
+
protected HFileBlock.Writer
HFileWriterImpl.blockWriter
block writer
-
-private HFileBlock.Writer
-HFileBlockIndex.BlockIndexWriter.blockWriter
-
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
index 29c8b1e..fabd03f 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
@@ -136,15 +136,15 @@
HFileContext
-HFileBlockDecodingContext.getHFileContext()
+HFileBlockEncodingContext.getHFileContext()
HFileContext
-HFileBlockDefaultDecodingContext.getHFileContext()
+HFileBlockDecodingContext.getHFileContext()
HFileContext
-HFileBlockEncodingContext.getHFileContext()
+HFileBlockDefaultDecodingContext.getHFileContext()
HFileContext
@@ -224,23 +224,23 @@
private HFileContext
+HFile.WriterFactory.fileContext
+
+
+private HFileContext
HFileBlock.fileContext
Meta data that holds meta information on the
hfileblock.
-
+
private HFileContext
HFileBlock.Writer.fileContext
Meta data that holds information about the hfileblock
-
-private HFileContext
-HFileBlock.FSReaderImpl.fileContext
-
private HFileContext
-HFile.WriterFactory.fileContext
+HFileBlock.FSReaderImpl.fileContext
private HFileContext
@@ -277,20 +277,20 @@
HFileContext
-HFileWriterImpl.getFileContext()
-
-
-HFileContext
HFile.Writer.getFileContext()
Return the file context for the HFile this writer belongs
to
-
+
HFileContext
HFile.Reader.getFileContext()
Return the file context of the HFile this reader belongs
to
+
+HFileContext
+HFileWriterImpl.getFileContext()
+
HFileContext
HFileReaderImpl.getFileContext()
@@ -323,35 +323,35 @@
HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
-
-
-HFileBlockDecodingContext
NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
-
+
HFileBlockDecodingContext
HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
create a encoder specific decoding context for
reading.
-
-HFileBlockEncodingContext
-HFileDataBlockEncoderImpl.newDataBlockEncodingContext(byte[]dummyHeader,
- HFileContextfileContext)
-
+HFileBlockDecodingContext
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
+
+
HFileBlockEncodingContext
NoOpDataBlockEncoder.newDataBlockEncodingContext(byte[]dummyHeader,
HFileContextmeta)
-
+
HFileBlockEncodingContext
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index abeccf1..ef30022 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
ImmutableBytesWritable
-TableRecordReader.createKey()
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
+TableRecordReader.createKey()
ImmutableBytesWritable
@@ -183,11 +183,9 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
- org.apache.hadoop.mapred.Reporterreporter)
-Builds a TableRecordReader.
-
+
org.apache.hadoop.mapred.Reporterreporter)
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
@@ -197,9 +195,11 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-
org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Builds a TableRecordReader.
+
@@ -218,12 +218,10 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-IdentityTableMap.map(ImmutableBytesWritablekey,
- Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+ Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+ org.apache.hadoop.mapred.Reporterreporter)
void
@@ -236,19 +234,21 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
- Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+ Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
boolean
-TableRecordReader.next(ImmutableBytesWritablekey,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
Resultvalue)
boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
+TableRecordReader.next(ImmutableBytesWritablekey,
Resultvalue)
@@ -281,12 +281,10 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-IdentityTableMap.map(ImmutableBytesWritablekey,
- Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+ Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+ org.apache.hadoop.mapred.Reporterreporter)
void
@@ -299,10 +297,12 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
- Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+ Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
- org.apache.hadoop.mapred.Reporterreporter)
+ org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key
+TableRecordReaderImpl.key
private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
private ImmutableBytesWritable
-TableRecordReaderImpl.key
+MultithreadedTableMapper.SubMapRecordReader.key
(package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and
utility methods.
ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.getCurrentKey()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 8e07c04..8c36b0f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
008@InterfaceAudience.Private
009public class Version {
010 public static final String version =
"3.0.0-SNAPSHOT";
-011 public static final String revision =
"485af49e53cb38e2af4635f2c3bc0b33e15ba0a1";
+011 public static final String revision =
"b7b86839250bf9b295ebc1948826f43a88736d6c";
012 public static final String user =
"jenkins";
-013 public static final String date = "Sun
Mar 4 05:45:33 UTC 2018";
+013 public static final String date = "Mon
Mar 5 14:41:23 UTC 2018";
014 public static final String url =
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015 public static final String srcChecksum
= "ae056300970e553626dc6912e61066bd";
+015 public static final String srcChecksum
= "a96590ff72b740efcf08fb6bef3bd5d5";
016}
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
index 1538cfd..b437410 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
@@ -240,69 +240,75 @@
232 boolean
hasRegionMemStoreReplication();
233
234 /**
-235 * Check if the compaction enable flag
of the table is true. If flag is false
-236 * then no minor/major compactions will
be done in real.
-237 *
-238 * @return true if table compaction
enabled
-239 */
-240 boolean isCompactionEnabled();
-241
-242 /**
-243 * Checks if this table is code
hbase:meta /code region.
-244 *
-245 * @return true if this table is
code hbase:meta /code region
-246 */
-247 boolean isMetaRegion();
-248
-249 /**
-250 * Checks if the table is a
codehbase:meta/code table
-251 *
-252 * @return true if table is
code hbase:meta /code region.
-253 */
-254 boolean isMetaTable();
-255
-256 /**
-257 * Check if normalization enable flag
of the table is true. If flag is false
-258 * then no region normalizer won't
attempt to normalize this table.
-259 *
-260 * @return true if region normalization
is enabled for this table
-261 */
-262 boolean isNormalizationEnabled();
-263
-264 /**
-265 * Check if the readOnly flag of the
table is set. If the readOnly flag is set
-266 * then the contents of the table can
only be read from but not modified.
-267 *
-268 * @return true if all columns in the
table should be read only
-269 */
-270 boolean isReadOnly();
-271
-272 /**
-273 * Check if the table's cfs'
replication scope matched with the replication state
-274 * @param enabled replication state
-275 * @return true if matched, otherwise
false
-276 */
-277 default boolean
matchReplicationScope(boolean enabled) {
-278boolean hasEnabled = false;
-279boolean hasDisabled = false;
-280
-281for (ColumnFamilyDescriptor cf :
getColumnFamilies()) {
-282 if (cf.getScope() !=
HConstants.REPLICATION_SCOPE_GLOBAL) {
-283hasDisabled = true;
-284 } else {
-285hasEnabled = true;
-286 }
-287}
-288
-289if (hasEnabled
hasDisabled) {
-290 return false;
-291}
-292if (hasEnabled) {
-293 return enabled;
-294}
-295return !enabled;
-296 }
-297}
+235 * @return true if there are at least
one cf whose replication scope is serial.
+236 */
+237 boolean hasSerialReplicationScope();
+238
+239 /**
+240 * Check if the compaction enable flag
of the table is true. If flag is false
+241 * then no minor/major compactions will
be done in real.
+242 *
+243 * @return true if table compaction
enabled
+244 */
+245 boolean isCompactionEnabled();
+246
+247 /**
+248 * Checks if this table is code
hbase:meta /code region.
+249 *
+250 * @return true if this table is
code hbase:meta /code region
+251 */
+252 boolean isMetaRegion();
+253
+254 /**
+255 * Checks if the table is a
codehbase:meta/code table
+256 *
+257 * @return true if table is
code hbase:meta /code region.
+258 */
+259 boolean isMetaTable();
+260
+261 /**
+262 * Check if normalization enable flag
of the table is true. If flag is false
+263 * then no region normalizer won't
attempt to normalize this table.
+264 *
+265 * @return true if region normalization
is
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
--
diff --git
a/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
b/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
index f98f198..dcee4f2 100644
--- a/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
+++ b/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
@@ -91,16 +91,16 @@
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
title="class or interface in java.lang">java.lang.Throwable
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
title="class or interface in java.lang">java.lang.Throwable
-http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">java.lang.Exception
+https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">java.lang.Exception
-http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">java.io.IOException
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">java.io.IOException
org.apache.hadoop.hbase.client.RetriesExhaustedException
@@ -118,7 +118,7 @@
All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable
+https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable
Direct Known Subclasses:
@@ -128,7 +128,7 @@
@InterfaceAudience.Public
public class RetriesExhaustedException
-extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
+extends https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
Exception thrown by HTable methods when an attempt to do
something (like
commit changes) fails after a bunch of retries.
@@ -154,23 +154,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
RetriesExhaustedException(intnumRetries,
- http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContextexceptions)
+ https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContextexceptions)
Create a new RetriesExhaustedException from the list of
prior failures.
-RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringmsg)
+RetriesExhaustedException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringmsg)
-RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringcallableVitals,
+RetriesExhaustedException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">StringcallableVitals,
intnumTries,
- http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
title="class or interface in
java.lang">Throwableexceptions)
+ https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
title="class or interface in
java.lang">Throwableexceptions)
Create a new RetriesExhaustedException from the list of
prior failures.
-RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringmsg,
-
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
index 9d27237..d2c2295 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
@@ -28,129 +28,128 @@
020
021import java.io.Closeable;
022import java.io.IOException;
-023
-024import
org.apache.hadoop.hbase.HRegionInfo;
-025import
org.apache.hadoop.hbase.TableName;
-026import
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-027import
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-028import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-029import
org.apache.hadoop.hbase.util.Bytes;
-030import
org.apache.yetus.audience.InterfaceAudience;
-031
-032/**
-033 * A RetryingCallable for Master RPC
operations.
-034 * Implement the #rpcCall method. It will
be retried on error. See its javadoc and the javadoc of
-035 * #call(int). See {@link HBaseAdmin} for
examples of how this is used. To get at the
-036 * rpcController that has been created
and configured to make this rpc call, use getRpcController().
-037 * We are trying to contain all protobuf
references including references to rpcController so we
-038 * don't pollute codebase with protobuf
references; keep the protobuf references contained and only
-039 * present in a few classes rather than
all about the code base.
-040 * pLike {@link
RegionServerCallable} only in here, we can safely be
PayloadCarryingRpcController
-041 * all the time. This is not possible in
the similar {@link RegionServerCallable} Callable because
-042 * it has to deal with Coprocessor
Endpoints.
-043 * @param V return type
-044 */
-045@InterfaceAudience.Private
-046abstract class MasterCallableV
implements RetryingCallableV, Closeable {
-047 protected final ClusterConnection
connection;
-048 protected MasterKeepAliveConnection
master;
-049 private final HBaseRpcController
rpcController;
-050
-051 MasterCallable(final Connection
connection, final RpcControllerFactory rpcConnectionFactory) {
-052this.connection = (ClusterConnection)
connection;
-053this.rpcController =
rpcConnectionFactory.newController();
-054 }
-055
-056 @Override
-057 public void prepare(boolean reload)
throws IOException {
-058this.master =
this.connection.getKeepAliveMasterService();
-059 }
-060
-061 @Override
-062 public void close() throws IOException
{
-063// The above prepare could fail but
this would still be called though masterAdmin is null
-064if (this.master != null) {
-065 this.master.close();
-066 this.master = null;
-067}
-068 }
-069
-070 @Override
-071 public void throwable(Throwable t,
boolean retrying) {
-072 }
-073
-074 @Override
-075 public String
getExceptionMessageAdditionalDetail() {
-076return "";
-077 }
-078
-079 @Override
-080 public long sleep(long pause, int
tries) {
-081return
ConnectionUtils.getPauseTime(pause, tries);
-082 }
-083
-084 /**
-085 * Override that changes the {@link
java.util.concurrent.Callable#call()} Exception from {@link Exception} to
-086 * {@link IOException}. It also does
setup of an rpcController and calls through to the rpcCall()
-087 * method which callers are expected to
implement. If rpcController is an instance of
-088 * PayloadCarryingRpcController, we
will set a timeout on it.
-089 */
-090 @Override
-091 // Same trick as in
RegionServerCallable so users don't have to copy/paste so much boilerplate
-092 // and so we contain references to
protobuf. We can't set priority on the rpcController as
-093 // we do in RegionServerCallable
because we don't always have a Table when we call.
-094 public V call(int callTimeout) throws
IOException {
-095try {
-096 if (this.rpcController != null) {
-097this.rpcController.reset();
-098
this.rpcController.setCallTimeout(callTimeout);
-099 }
-100 return rpcCall();
-101} catch (Exception e) {
-102 throw
ProtobufUtil.handleRemoteException(e);
-103}
-104 }
-105
-106 /**
-107 * Run the RPC call. Implement this
method. To get at the rpcController that has been created
-108 * and configured to make this rpc
call, use getRpcController(). We are trying to contain
-109 * rpcController references so we don't
pollute codebase with protobuf references; keep the
-110 * protobuf references contained and
only present in a few classes rather than all about the
-111 * code base.
-112 * @throws Exception
-113 */
-114 protected abstract V rpcCall() throws
Exception;
-115
-116 HBaseRpcController getRpcController()
{
-117return this.rpcController;
-118 }
-119
-120 void setPriority(final
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 802b925..a3e80ab 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -73,229 +73,229 @@
065import
java.util.concurrent.TimeoutException;
066import
java.util.concurrent.atomic.AtomicBoolean;
067import
java.util.concurrent.atomic.AtomicInteger;
-068import
java.util.concurrent.atomic.AtomicLong;
-069import
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import
java.util.concurrent.locks.ReadWriteLock;
-072import
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import
org.apache.hadoop.hbase.CellBuilderType;
-081import
org.apache.hadoop.hbase.CellComparator;
-082import
org.apache.hadoop.hbase.CellComparatorImpl;
-083import
org.apache.hadoop.hbase.CellScanner;
-084import
org.apache.hadoop.hbase.CellUtil;
-085import
org.apache.hadoop.hbase.CompareOperator;
-086import
org.apache.hadoop.hbase.CompoundConfiguration;
-087import
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import
org.apache.hadoop.hbase.HConstants;
-091import
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import
org.apache.hadoop.hbase.HRegionInfo;
-094import
org.apache.hadoop.hbase.KeyValue;
-095import
org.apache.hadoop.hbase.KeyValueUtil;
-096import
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import
org.apache.hadoop.hbase.NotServingRegionException;
-098import
org.apache.hadoop.hbase.PrivateCellUtil;
-099import
org.apache.hadoop.hbase.RegionTooBusyException;
-100import
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import
org.apache.hadoop.hbase.UnknownScannerException;
-104import
org.apache.hadoop.hbase.client.Append;
-105import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import
org.apache.hadoop.hbase.client.CompactionState;
-107import
org.apache.hadoop.hbase.client.Delete;
-108import
org.apache.hadoop.hbase.client.Durability;
-109import
org.apache.hadoop.hbase.client.Get;
-110import
org.apache.hadoop.hbase.client.Increment;
-111import
org.apache.hadoop.hbase.client.IsolationLevel;
-112import
org.apache.hadoop.hbase.client.Mutation;
-113import
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import
org.apache.hadoop.hbase.client.Put;
-115import
org.apache.hadoop.hbase.client.RegionInfo;
-116import
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import
org.apache.hadoop.hbase.client.Result;
-118import
org.apache.hadoop.hbase.client.RowMutations;
-119import
org.apache.hadoop.hbase.client.Scan;
-120import
org.apache.hadoop.hbase.client.TableDescriptor;
-121import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import
org.apache.hadoop.hbase.io.HFileLink;
-133import
org.apache.hadoop.hbase.io.HeapSize;
-134import
org.apache.hadoop.hbase.io.TimeRange;
-135import
org.apache.hadoop.hbase.io.hfile.HFile;
-136import
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import
org.apache.hadoop.hbase.ipc.RpcCall;
-139import
org.apache.hadoop.hbase.ipc.RpcServer;
-140import
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index bd13b53..802b925 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -900,7600 +900,7598 @@
892if
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
893 status.setStatus("Writing region
info on filesystem");
894 fs.checkRegionInfoOnFilesystem();
-895} else {
-896 if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of
.regioninfo file for " + this.getRegionInfo());
-898 }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all
the Stores");
-903long maxSeqId =
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906 CollectionHStore stores =
this.stores.values();
-907 try {
-908// update the stores that we are
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if
available.
-911maxSeqId = Math.max(maxSeqId,
-912
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter,
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915 } finally {
-916// update the stores that we are
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918 }
-919}
-920this.lastReplayedOpenRegionSeqId =
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all
the Stores");
+899long maxSeqId =
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902 CollectionHStore stores =
this.stores.values();
+903 try {
+904// update the stores that we are
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if
available.
+907maxSeqId = Math.max(maxSeqId,
+908
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter,
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911 } finally {
+912// update the stores that we are
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914 }
+915}
+916this.lastReplayedOpenRegionSeqId =
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested =
false;
+920this.writestate.compacting.set(0);
921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested =
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled)
{
-927 // Remove temporary data left over
from old regions
-928 status.setStatus("Cleaning up
temporary data from old regions");
-929 fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled)
{
-933 status.setStatus("Cleaning up
detritus from prior splits");
-934 // Get rid of any splits or merges
that were lost in-progress. Clean out
-935 // these directories here on open.
We may be opening a region that was
-936 // being split but we crashed in
the middle of it all.
-937 fs.cleanupAnySplitDetritus();
-938 fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy =
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy =
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime =
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values())
{
-949
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or
that which was found in stores
-953// (particularly if no recovered
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled)
{
-956 nextSeqid =
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957 this.fs.getRegionDir(),
nextSeqid, 1);
-958} else {
-959 nextSeqid++;
-960}
-961
-962LOG.info("Onlined " +
this.getRegionInfo().getShortNameToLog() +
-963 "; next sequenceid=" +
nextSeqid);
+922if (this.writestate.writesEnabled)
{
+923 // Remove temporary data left
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
index 3628d68..bd2f966 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
@@ -152,27 +152,27 @@ the order they are declared.
PeerProcedureInterface.PeerOperationType
-RefreshPeerProcedure.getPeerOperationType()
+DisablePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-DisablePeerProcedure.getPeerOperationType()
+RemovePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-UpdatePeerConfigProcedure.getPeerOperationType()
+EnablePeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-AddPeerProcedure.getPeerOperationType()
+RefreshPeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-EnablePeerProcedure.getPeerOperationType()
+AddPeerProcedure.getPeerOperationType()
PeerProcedureInterface.PeerOperationType
-RemovePeerProcedure.getPeerOperationType()
+UpdatePeerConfigProcedure.getPeerOperationType()
private static PeerProcedureInterface.PeerOperationType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
index 269bc46..f7a6279 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
@@ -125,11 +125,11 @@
private ProcedurePrepareLatch
-RecoverMetaProcedure.syncLatch
+AbstractStateMachineTableProcedure.syncLatch
private ProcedurePrepareLatch
-AbstractStateMachineTableProcedure.syncLatch
+RecoverMetaProcedure.syncLatch
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
index 5e8085c..8b6ceb7 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
@@ -104,14 +104,14 @@
ServerProcedureInterface.ServerOperationType
-ServerCrashProcedure.getServerOperationType()
-
-
-ServerProcedureInterface.ServerOperationType
ServerProcedureInterface.getServerOperationType()
Given an operation type we can take decisions about what to
do with pending operations.
+
+ServerProcedureInterface.ServerOperationType
+ServerCrashProcedure.getServerOperationType()
+
static ServerProcedureInterface.ServerOperationType
ServerProcedureInterface.ServerOperationType.valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">Stringname)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
index 046295e..e736f37 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
@@ -112,19 +112,19 @@
TableProcedureInterface.TableOperationType
-MoveRegionProcedure.getTableOperationType()
+UnassignProcedure.getTableOperationType()
TableProcedureInterface.TableOperationType
-GCMergedRegionsProcedure.getTableOperationType()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
index b8ce496..570fb68 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
@@ -168,27 +168,39 @@
void
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+ Cacheablebuf)
+
+
+void
BlockCache.cacheBlock(BlockCacheKeycacheKey,
Cacheablebuf)
Add block to cache (defaults to not in-memory).
-
+
void
LruBlockCache.cacheBlock(BlockCacheKeycacheKey,
Cacheablebuf)
Cache the block with the specified name and buffer.
-
+
void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
Cacheablebuf)
+
+void
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+ Cacheablebuf,
+ booleaninMemory)
+
void
-MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
- Cacheablebuf)
+InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+ Cacheablebuf,
+ booleaninMemory)
void
@@ -208,18 +220,6 @@
void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
- Cacheablebuf,
- booleaninMemory)
-
-
-void
-InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
- Cacheablebuf,
- booleaninMemory)
-
-
-void
MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
Cacheablebuf,
booleaninMemory)
@@ -232,21 +232,21 @@
boolean
-BlockCache.evictBlock(BlockCacheKeycacheKey)
-Evict block from cache.
-
+CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
boolean
-LruBlockCache.evictBlock(BlockCacheKeycacheKey)
+InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
boolean
-CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
+BlockCache.evictBlock(BlockCacheKeycacheKey)
+Evict block from cache.
+
boolean
-InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
+LruBlockCache.evictBlock(BlockCacheKeycacheKey)
boolean
@@ -254,35 +254,35 @@
Cacheable
-BlockCache.getBlock(BlockCacheKeycacheKey,
+CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
booleancaching,
booleanrepeat,
-booleanupdateCacheMetrics)
-Fetch block from cache.
-
+booleanupdateCacheMetrics)
Cacheable
-LruBlockCache.getBlock(BlockCacheKeycacheKey,
+InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
booleancaching,
booleanrepeat,
-booleanupdateCacheMetrics)
-Get the buffer of the block with the specified name.
-
+booleanupdateCacheMetrics)
Cacheable
-CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+BlockCache.getBlock(BlockCacheKeycacheKey,
booleancaching,
booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Fetch block from cache.
+
Cacheable
-InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+LruBlockCache.getBlock(BlockCacheKeycacheKey,
booleancaching,
booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Get the buffer of the block with the specified name.
+
Cacheable
@@ -308,6 +308,11 @@
CombinedBlockCache.getRefCount(BlockCacheKeycacheKey)
+void
+CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
+ Cacheableblock)
+
+
default void
BlockCache.returnBlock(BlockCacheKeycacheKey,
Cacheableblock)
@@ -315,11 +320,6 @@
is over.
-
-void
-CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
- Cacheableblock)
-
@@ -497,13 +497,13 @@
void
-CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
-Attempt to add the specified entry to this queue.
-
+BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
void
-BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
+CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
+Attempt to add the specified entry to this queue.
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
index d2c9cca..146b426 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
@@ -282,7 +282,10 @@
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
-FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterState)
+SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterMap)
+Generate a global load balancing plan according to the
specified map of
+ server information to the most loaded regions of each server.
+
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
@@ -292,19 +295,16 @@
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
-SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterMap)
-Generate a global load balancing plan according to the
specified map of
- server information to the most loaded regions of each server.
-
+FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterState)
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
-StochasticLoadBalancer.balanceCluster(TableNametableName,
+SimpleLoadBalancer.balanceCluster(TableNametableName,
http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterState)
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionPlan
-SimpleLoadBalancer.balanceCluster(TableNametableName,
+StochasticLoadBalancer.balanceCluster(TableNametableName,
http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionInfoclusterState)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
index 00f46c3..75603cc 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
@@ -143,7 +143,7 @@
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionState
-ClusterMetrics.getRegionStatesInTransition()
+ClusterMetricsBuilder.ClusterMetricsImpl.getRegionStatesInTransition()
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionState
@@ -153,7 +153,7 @@
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionState
-ClusterMetricsBuilder.ClusterMetricsImpl.getRegionStatesInTransition()
+ClusterMetrics.getRegionStatesInTransition()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
index 7161108..fe5ef34 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
@@ -166,27 +166,27 @@
DataBlockEncoder.EncodedSeeker
-CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
+RowIndexCodecV1.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
+DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
DataBlockEncoder.EncodedSeeker
-RowIndexCodecV1.createSeeker(CellComparatorcomparator,
+PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
HFileBlockDecodingContextdecodingCtx)
@@ -198,13 +198,13 @@
http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
- HFileBlockDecodingContextblkDecodingCtx)
+RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+ HFileBlockDecodingContextdecodingCtx)
http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
- HFileBlockDecodingContextdecodingCtx)
+BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+ HFileBlockDecodingContextblkDecodingCtx)
@@ -279,17 +279,17 @@
HFileBlockDecodingContext
-NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
HFileBlockDecodingContext
-HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
-create a encoder specific decoding context for
reading.
-
+NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
+HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
+create a encoder specific decoding context for
reading.
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
index 79b047f..66443b9 100644
---
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
+++
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
@@ -116,36 +116,36 @@
HFileBlockDefaultDecodingContextdecodingCtx)
-protected abstract http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
title="class or interface in java.io">DataInputStreamsource,
+protected http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
index 3117787..ba0cca5 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
@@ -121,11 +121,11 @@
TableStateManager
-MasterServices.getTableStateManager()
+HMaster.getTableStateManager()
TableStateManager
-HMaster.getTableStateManager()
+MasterServices.getTableStateManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
index b5b7703..a444123 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
@@ -117,11 +117,11 @@
LockManager
-MasterServices.getLockManager()
+HMaster.getLockManager()
LockManager
-HMaster.getLockManager()
+MasterServices.getLockManager()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
index f476c74..d589aaa 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
@@ -104,15 +104,15 @@
NormalizationPlan.PlanType
-NormalizationPlan.getType()
+MergeNormalizationPlan.getType()
NormalizationPlan.PlanType
-SplitNormalizationPlan.getType()
+NormalizationPlan.getType()
NormalizationPlan.PlanType
-MergeNormalizationPlan.getType()
+SplitNormalizationPlan.getType()
NormalizationPlan.PlanType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
index d8fb2f6..ad4e9b4 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
@@ -125,11 +125,11 @@
RegionNormalizer
-MasterServices.getRegionNormalizer()
+HMaster.getRegionNormalizer()
RegionNormalizer
-HMaster.getRegionNormalizer()
+MasterServices.getRegionNormalizer()
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index d1757b3..529d712 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -332,11 +332,11 @@
java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable)
-org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
-org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
org.apache.hadoop.hbase.master.RegionState.State
+org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
+org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 4d04e3e..9c13a58 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -133,11 +133,11 @@
ProcedureExecutorMasterProcedureEnv
-HMaster.getMasterProcedureExecutor()
+MasterServices.getMasterProcedureExecutor()
ProcedureExecutorMasterProcedureEnv
-MasterServices.getMasterProcedureExecutor()
+HMaster.getMasterProcedureExecutor()
private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
@@ -194,15 +194,15 @@
protected Procedure.LockState
-RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
+GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
protected Procedure.LockState
-GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
+MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
protected Procedure.LockState
-MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
+RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
protected boolean
@@ -295,7 +295,7 @@
protected void
-UnassignProcedure.finishTransition(MasterProcedureEnvenv,
+AssignProcedure.finishTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
@@ -305,7 +305,7 @@
protected void
-AssignProcedure.finishTransition(MasterProcedureEnvenv,
+UnassignProcedure.finishTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
@@ -314,7 +314,7 @@
protected ProcedureMetrics
-UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
protected ProcedureMetrics
@@ -326,7 +326,7 @@
protected ProcedureMetrics
-AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
+UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
(package private) static
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
@@ -357,7 +357,7 @@
ServerName
-UnassignProcedure.getServer(MasterProcedureEnvenv)
+AssignProcedure.getServer(MasterProcedureEnvenv)
abstract ServerName
@@ -367,7 +367,7 @@
ServerName
-AssignProcedure.getServer(MasterProcedureEnvenv)
+UnassignProcedure.getServer(MasterProcedureEnvenv)
private ServerName
@@ -384,19 +384,19 @@
protected boolean
-RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
+MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
protected boolean
-MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
+RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
protected boolean
-RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
+MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
protected boolean
-MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
+RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
private boolean
@@ -510,15 +510,15 @@
protected void
-RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
+MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
protected void
-MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
+RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
RemoteProcedureDispatcher.RemoteOperation
-UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
+AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
ServerNameserverName)
@@ -528,12 +528,12 @@
RemoteProcedureDispatcher.RemoteOperation
-AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
+UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
ServerNameserverName)
protected boolean
-UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
+AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode,
http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in
java.io">IOExceptionexception)
@@ -545,7 +545,7 @@
protected boolean
-AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
+UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode,
http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in
java.io">IOExceptionexception)
@@ -566,10 +566,10 @@
protected void
-UnassignProcedure.reportTransition(MasterProcedureEnvenv,
+AssignProcedure.reportTransition(MasterProcedureEnvenv,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 7515d7b..3c4825d 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -762,7 +762,7 @@
754boolean wasUp =
this.clusterStatusTracker.isClusterUp();
755if (!wasUp)
this.clusterStatusTracker.setClusterUp();
756
-757LOG.info("Server active/primary
master=" + this.serverName +
+757LOG.info("Active/primary master=" +
this.serverName +
758", sessionid=0x" +
759
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
760", setting cluster-up flag (Was="
+ wasUp + ")");
@@ -1161,7 +1161,7 @@
1153 startProcedureExecutor();
1154
1155 // Start log cleaner thread
-1156 int cleanerInterval =
conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
+1156 int cleanerInterval =
conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
1157 this.logCleaner =
1158 new LogCleaner(cleanerInterval,
1159 this, conf,
getMasterWalManager().getFileSystem(),
@@ -1227,2368 +1227,2369 @@
1219procedureExecutor = new
ProcedureExecutor(conf, procEnv, procedureStore, procedureScheduler);
1220
configurationManager.registerObserver(procEnv);
1221
-1222final int numThreads =
conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
-1223
Math.max(Runtime.getRuntime().availableProcessors(),
-1224
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
-1225final boolean abortOnCorruption =
conf.getBoolean(
-1226
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
-1227
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
-1228procedureStore.start(numThreads);
-1229procedureExecutor.start(numThreads,
abortOnCorruption);
-1230
procEnv.getRemoteDispatcher().start();
-1231 }
-1232
-1233 private void stopProcedureExecutor()
{
-1234if (procedureExecutor != null) {
-1235
configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
-1236
procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
-1237 procedureExecutor.stop();
-1238 procedureExecutor.join();
-1239 procedureExecutor = null;
-1240}
-1241
-1242if (procedureStore != null) {
-1243
procedureStore.stop(isAborted());
-1244 procedureStore = null;
-1245}
-1246 }
-1247
-1248 private void stopChores() {
-1249if (this.expiredMobFileCleanerChore
!= null) {
-1250
this.expiredMobFileCleanerChore.cancel(true);
-1251}
-1252if (this.mobCompactChore != null)
{
-1253
this.mobCompactChore.cancel(true);
-1254}
-1255if (this.balancerChore != null) {
-1256 this.balancerChore.cancel(true);
-1257}
-1258if (this.normalizerChore != null)
{
-1259
this.normalizerChore.cancel(true);
-1260}
-1261if (this.clusterStatusChore != null)
{
-1262
this.clusterStatusChore.cancel(true);
-1263}
-1264if (this.catalogJanitorChore !=
null) {
-1265
this.catalogJanitorChore.cancel(true);
-1266}
-1267if (this.clusterStatusPublisherChore
!= null){
-1268
clusterStatusPublisherChore.cancel(true);
-1269}
-1270if (this.mobCompactThread != null)
{
-1271 this.mobCompactThread.close();
-1272}
-1273
-1274if (this.quotaObserverChore != null)
{
-1275 quotaObserverChore.cancel();
-1276}
-1277if (this.snapshotQuotaChore != null)
{
-1278 snapshotQuotaChore.cancel();
-1279}
-1280 }
-1281
-1282 /**
-1283 * @return Get remote side's
InetAddress
-1284 */
-1285 InetAddress getRemoteInetAddress(final
int port,
-1286 final long serverStartCode) throws
UnknownHostException {
-1287// Do it out here in its own little
method so can fake an address when
-1288// mocking up in tests.
-1289InetAddress ia =
RpcServer.getRemoteIp();
-1290
-1291// The call could be from the local
regionserver,
-1292// in which case, there is no remote
address.
-1293if (ia == null
serverStartCode == startcode) {
-1294 InetSocketAddress isa =
rpcServices.getSocketAddress();
-1295 if (isa != null
isa.getPort() == port) {
-1296ia = isa.getAddress();
-1297 }
-1298}
-1299return ia;
-1300 }
-1301
-1302 /**
-1303 * @return Maximum time we should run
balancer for
-1304 */
-1305 private int getMaxBalancingTime() {
-1306int maxBalancingTime =
getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
-1307if (maxBalancingTime == -1) {
-1308
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 4a87b9d..7515d7b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -25,3542 +25,3570 @@
017 */
018package org.apache.hadoop.hbase.master;
019
-020import com.google.protobuf.Descriptors;
-021import com.google.protobuf.Service;
-022import java.io.IOException;
-023import java.io.InterruptedIOException;
-024import java.lang.reflect.Constructor;
-025import
java.lang.reflect.InvocationTargetException;
-026import java.net.InetAddress;
-027import java.net.InetSocketAddress;
-028import java.net.UnknownHostException;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.Collection;
-032import java.util.Collections;
-033import java.util.Comparator;
-034import java.util.EnumSet;
-035import java.util.HashMap;
-036import java.util.Iterator;
-037import java.util.List;
-038import java.util.Map;
-039import java.util.Map.Entry;
-040import java.util.Objects;
-041import java.util.Set;
-042import
java.util.concurrent.ExecutionException;
-043import java.util.concurrent.Future;
-044import java.util.concurrent.TimeUnit;
-045import
java.util.concurrent.TimeoutException;
-046import
java.util.concurrent.atomic.AtomicInteger;
-047import
java.util.concurrent.atomic.AtomicReference;
-048import java.util.function.Function;
-049import java.util.regex.Pattern;
-050import java.util.stream.Collectors;
-051import javax.servlet.ServletException;
-052import javax.servlet.http.HttpServlet;
-053import
javax.servlet.http.HttpServletRequest;
-054import
javax.servlet.http.HttpServletResponse;
-055import
org.apache.commons.lang3.StringUtils;
-056import
org.apache.hadoop.conf.Configuration;
-057import org.apache.hadoop.fs.Path;
-058import
org.apache.hadoop.hbase.ClusterId;
-059import
org.apache.hadoop.hbase.ClusterMetrics;
-060import
org.apache.hadoop.hbase.ClusterMetrics.Option;
-061import
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-062import
org.apache.hadoop.hbase.CoordinatedStateException;
-063import
org.apache.hadoop.hbase.DoNotRetryIOException;
-064import
org.apache.hadoop.hbase.HBaseIOException;
-065import
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-066import
org.apache.hadoop.hbase.HConstants;
-067import
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-068import
org.apache.hadoop.hbase.MasterNotRunningException;
-069import
org.apache.hadoop.hbase.MetaTableAccessor;
-070import
org.apache.hadoop.hbase.NamespaceDescriptor;
-071import
org.apache.hadoop.hbase.PleaseHoldException;
-072import
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-073import
org.apache.hadoop.hbase.ServerLoad;
-074import
org.apache.hadoop.hbase.ServerMetricsBuilder;
-075import
org.apache.hadoop.hbase.ServerName;
-076import
org.apache.hadoop.hbase.TableDescriptors;
-077import
org.apache.hadoop.hbase.TableName;
-078import
org.apache.hadoop.hbase.TableNotDisabledException;
-079import
org.apache.hadoop.hbase.TableNotFoundException;
-080import
org.apache.hadoop.hbase.UnknownRegionException;
-081import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-082import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-083import
org.apache.hadoop.hbase.client.MasterSwitchType;
-084import
org.apache.hadoop.hbase.client.RegionInfo;
-085import
org.apache.hadoop.hbase.client.Result;
-086import
org.apache.hadoop.hbase.client.TableDescriptor;
-087import
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-088import
org.apache.hadoop.hbase.client.TableState;
-089import
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-090import
org.apache.hadoop.hbase.exceptions.DeserializationException;
-091import
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-092import
org.apache.hadoop.hbase.executor.ExecutorType;
-093import
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-094import
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-095import
org.apache.hadoop.hbase.http.InfoServer;
-096import
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-097import
org.apache.hadoop.hbase.ipc.RpcServer;
-098import
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-099import
org.apache.hadoop.hbase.log.HBaseMarkers;
-100import
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-101import
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-102import
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-103import
org.apache.hadoop.hbase.master.assignment.RegionStates;
-104import
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-105import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
index 866db4a..e622a21 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
-public class TestCIBadHostname
+public class TestCIBadHostname
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Tests that we fail fast when hostname resolution is not
working and do not cache
unresolved InetSocketAddresses.
@@ -132,10 +132,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Field and Description
+static HBaseClassTestRule
+CLASS_RULE
+
+
private static
org.apache.hadoop.hbase.client.ConnectionImplementation
conn
-
+
private static HBaseTestingUtility
testUtil
@@ -209,13 +213,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
testUtil
-private staticHBaseTestingUtility testUtil
+private staticHBaseTestingUtility testUtil
@@ -224,7 +237,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
conn
-private
staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn
+private
staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn
@@ -241,7 +254,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TestCIBadHostname
-publicTestCIBadHostname()
+publicTestCIBadHostname()
@@ -258,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setupBeforeClass
-public staticvoidsetupBeforeClass()
+public staticvoidsetupBeforeClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -272,7 +285,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
teardownAfterClass
-public staticvoidteardownAfterClass()
+public staticvoidteardownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -286,7 +299,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testGetAdminBadHostname
-publicvoidtestGetAdminBadHostname()
+publicvoidtestGetAdminBadHostname()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
@@ -300,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
testGetClientBadHostname
-publicvoidtestGetClientBadHostname()
+publicvoidtestGetClientBadHostname()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
title="class or interface in java.lang">Exception
Throws:
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
index fee2ec8..ee543bf 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
Summary:
Nested|
-Field|
+Field|
Constr|
Method
Detail:
-Field|
+Field|
Constr|
Method
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
-public class TestCIDeleteOperationTimeout
+public class TestCIDeleteOperationTimeout
extends AbstractTestCIOperationTimeout
@@ -148,6 +148,17 @@ extends
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
@@ -220,6 +231,23 @@ extends
+
+
+
+
+
+Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
+
+
@@ -232,7 +260,7 @@ extends
TestCIDeleteOperationTimeout
-publicTestCIDeleteOperationTimeout()
+publicTestCIDeleteOperationTimeout()
@@ -249,7 +277,7 @@ extends
execute
-protectedvoidexecute(org.apache.hadoop.hbase.client.Tabletable)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
index add44d1..efa6d95 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
@@ -43,13 +43,13 @@
035import
org.apache.hadoop.hbase.backup.BackupType;
036import
org.apache.hadoop.hbase.backup.HBackupFileSystem;
037import
org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
-038import
org.apache.yetus.audience.InterfaceAudience;
-039import org.slf4j.Logger;
-040import org.slf4j.LoggerFactory;
-041import
org.apache.hadoop.hbase.client.Admin;
-042import
org.apache.hadoop.hbase.client.Connection;
-043import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-044import
org.apache.hadoop.hbase.util.FSUtils;
+038import
org.apache.hadoop.hbase.client.Admin;
+039import
org.apache.hadoop.hbase.client.Connection;
+040import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+041import
org.apache.hadoop.hbase.util.FSUtils;
+042import
org.apache.yetus.audience.InterfaceAudience;
+043import org.slf4j.Logger;
+044import org.slf4j.LoggerFactory;
045
046import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
047
@@ -88,360 +88,355 @@
080 }
081
082 public void init(final Connection conn,
final String backupId, BackupRequest request)
-083 throws IOException
-084 {
-085if (request.getBackupType() ==
BackupType.FULL) {
-086 backupManager = new
BackupManager(conn, conn.getConfiguration());
-087} else {
-088 backupManager = new
IncrementalBackupManager(conn, conn.getConfiguration());
-089}
-090this.backupId = backupId;
-091this.tableList =
request.getTableList();
-092this.conn = conn;
-093this.conf =
conn.getConfiguration();
-094this.fs =
FSUtils.getCurrentFileSystem(conf);
-095backupInfo =
-096
backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
-097 request.getTargetRootDir(),
request.getTotalTasks(), request.getBandwidth());
-098if (tableList == null ||
tableList.isEmpty()) {
-099 this.tableList = new
ArrayList(backupInfo.getTables());
-100}
-101// Start new session
-102backupManager.startBackupSession();
-103 }
-104
-105 /**
-106 * Begin the overall backup.
-107 * @param backupInfo backup info
-108 * @throws IOException exception
-109 */
-110 protected void
beginBackup(BackupManager backupManager, BackupInfo backupInfo)
-111 throws IOException {
-112
-113BackupSystemTable.snapshot(conn);
-114
backupManager.setBackupInfo(backupInfo);
-115// set the start timestamp of the
overall backup
-116long startTs =
EnvironmentEdgeManager.currentTime();
-117backupInfo.setStartTs(startTs);
-118// set overall backup status:
ongoing
-119
backupInfo.setState(BackupState.RUNNING);
-120
backupInfo.setPhase(BackupPhase.REQUEST);
-121LOG.info("Backup " +
backupInfo.getBackupId() + " started at " + startTs + ".");
-122
-123
backupManager.updateBackupInfo(backupInfo);
-124if (LOG.isDebugEnabled()) {
-125 LOG.debug("Backup session " +
backupInfo.getBackupId() + " has been started.");
-126}
-127 }
-128
-129 protected String getMessage(Exception
e) {
-130String msg = e.getMessage();
-131if (msg == null || msg.equals(""))
{
-132 msg = e.getClass().getName();
-133}
-134return msg;
-135 }
-136
-137 /**
-138 * Delete HBase snapshot for backup.
-139 * @param backupInfo backup info
-140 * @throws Exception exception
-141 */
-142 protected static void
deleteSnapshots(final Connection conn, BackupInfo backupInfo, Configuration
conf)
-143 throws IOException {
-144LOG.debug("Trying to delete snapshot
for full backup.");
-145for (String snapshotName :
backupInfo.getSnapshotNames()) {
-146 if (snapshotName == null) {
-147continue;
-148 }
-149 LOG.debug("Trying to delete
snapshot: " + snapshotName);
-150
-151 try (Admin admin = conn.getAdmin())
{
-152
admin.deleteSnapshot(snapshotName);
-153 }
-154 LOG.debug("Deleting the snapshot "
+ snapshotName + " for backup " + backupInfo.getBackupId()
-155 + " succeeded.");
-156}
-157 }
-158
-159 /**
-160 * Clean up directories with prefix
"exportSnapshot-", which are generated when exporting
-161 * snapshots.
-162 * @throws IOException exception
-163 */
-164 protected static void
cleanupExportSnapshotLog(Configuration conf) throws IOException {
-165FileSystem fs =
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
index 00ed486..dd532d4 100644
---
a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
+++
b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
@@ -100,12 +100,6 @@ var activeTableTab = "activeTableTab";
http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
-junit.framework.Assert
-
-
-junit.framework.TestCase
-
-
org.apache.hadoop.hbase.rest.model.TestModelBaseorg.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
@@ -115,20 +109,12 @@ var activeTableTab = "activeTableTab";
-
-
-
-
-
-All Implemented Interfaces:
-junit.framework.Test
-
-public class TestStorageClusterVersionModel
+public class TestStorageClusterVersionModel
extends TestModelBaseorg.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
@@ -212,18 +198,11 @@ extends fromJSON,
fromPB,
fromXML,
testBuildModel,
testFromJSON,
testFromXML,
testToJSON,
testToXML,
toJSON, toPB,
toXML
-
-
-
-Methods inherited from classjunit.framework.TestCase
-assertEquals, assertEquals, assertEquals, assertEquals, assertEquals,
assertEquals, assertEquals, assertEquals, assertEquals, assertEquals,
assertEquals, assertEquals, assertEquals, assertEquals, assertEquals,
assertEquals, assertEquals, assertEquals, assertEquals, assertEquals,
assertFalse, assertFalse, assertNotNull, assertNotNull, assertNotSame,
assertNotSame, assertNull, assertNull, assertSame, assertSame, assertTrue,
assertTrue, countTestCases, createResult, fail, fail, failNotEquals,
failNotSame, failSame, format, getName, run, run, runBare, runTest, setName,
setUp, tearDown, toString
-
-
Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
/Object.html?is-external=true#notifyAll--" title="class or interface in
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
title="class or interface in java.lang">wait
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
/Object.html?is-external=true#notifyAll--" title="class or interface in
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 50cc17f..c270b81 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
-static class HRegion.WriteState
+static class HRegion.WriteState
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
@@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
flushing
-volatileboolean flushing
+volatileboolean flushing
@@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
flushRequested
-volatileboolean flushRequested
+volatileboolean flushRequested
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
compacting
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
writesEnabled
-volatileboolean writesEnabled
+volatileboolean writesEnabled
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
readOnly
-volatileboolean readOnly
+volatileboolean readOnly
@@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
readsEnabled
-volatileboolean readsEnabled
+volatileboolean readsEnabled
@@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
HEAP_SIZE
-static finallong HEAP_SIZE
+static finallong HEAP_SIZE
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
WriteState
-WriteState()
+WriteState()
@@ -327,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setReadOnly
-voidsetReadOnly(booleanonOff)
+voidsetReadOnly(booleanonOff)
Set flags that make this region read-only.
Parameters:
@@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
isReadOnly
-booleanisReadOnly()
+booleanisReadOnly()
@@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
isFlushRequested
-booleanisFlushRequested()
+booleanisFlushRequested()
@@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
setReadsEnabled
-voidsetReadsEnabled(booleanreadsEnabled)
+voidsetReadsEnabled(booleanreadsEnabled)
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
index f1db5ca..d8515d7 100644
---
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
+++
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
@@ -32,813 +32,820 @@
024import static org.junit.Assert.fail;
025
026import java.io.IOException;
-027import java.net.SocketTimeoutException;
-028import java.util.NavigableMap;
-029import java.util.Random;
-030import java.util.Set;
-031import java.util.SortedSet;
-032import
java.util.concurrent.ConcurrentSkipListMap;
-033import
java.util.concurrent.ConcurrentSkipListSet;
-034import
java.util.concurrent.ExecutionException;
-035import java.util.concurrent.Executors;
-036import java.util.concurrent.Future;
-037import
java.util.concurrent.ScheduledExecutorService;
-038import java.util.concurrent.TimeUnit;
-039
-040import
org.apache.hadoop.conf.Configuration;
-041import
org.apache.hadoop.hbase.CategoryBasedTimeout;
-042import
org.apache.hadoop.hbase.DoNotRetryIOException;
-043import
org.apache.hadoop.hbase.HBaseTestingUtility;
-044import
org.apache.hadoop.hbase.NotServingRegionException;
-045import
org.apache.hadoop.hbase.ServerName;
-046import
org.apache.hadoop.hbase.TableName;
-047import
org.apache.hadoop.hbase.client.RegionInfo;
-048import
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-049import
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-050import
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-051import
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-052import
org.apache.hadoop.hbase.master.MasterServices;
-053import
org.apache.hadoop.hbase.master.RegionState.State;
-054import
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-055import
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-056import
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
-057import
org.apache.hadoop.hbase.procedure2.Procedure;
-058import
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-059import
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-060import
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-061import
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-062import
org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-063import
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-064import
org.apache.hadoop.hbase.testclassification.MasterTests;
-065import
org.apache.hadoop.hbase.testclassification.MediumTests;
-066import
org.apache.hadoop.hbase.util.Bytes;
-067import
org.apache.hadoop.hbase.util.FSUtils;
-068import
org.apache.hadoop.ipc.RemoteException;
-069import org.junit.After;
-070import org.junit.Before;
-071import org.junit.Ignore;
-072import org.junit.Rule;
-073import org.junit.Test;
-074import
org.junit.experimental.categories.Category;
-075import
org.junit.rules.ExpectedException;
-076import org.junit.rules.TestName;
-077import org.junit.rules.TestRule;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-082import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-083import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-084import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-085import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
-086import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
-087import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
-088import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
-089import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-090import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-091import
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-092
-093@Category({MasterTests.class,
MediumTests.class})
-094public class TestAssignmentManager {
-095 private static final Logger LOG =
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
--
diff --git
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
index 232ef56..bc3a6d0 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
@@ -29,610 +29,626 @@
021import static
org.junit.Assert.assertEquals;
022import static
org.junit.Assert.assertFalse;
023import static
org.junit.Assert.assertTrue;
-024
-025import java.io.ByteArrayOutputStream;
-026import java.io.IOException;
-027import java.math.BigDecimal;
-028import java.nio.ByteBuffer;
-029import java.util.ArrayList;
-030import java.util.List;
-031import java.util.NavigableMap;
-032import java.util.TreeMap;
-033import
org.apache.hadoop.hbase.testclassification.MiscTests;
-034import
org.apache.hadoop.hbase.testclassification.SmallTests;
-035import
org.apache.hadoop.hbase.util.Bytes;
-036import org.junit.Assert;
-037import org.junit.Test;
-038import
org.junit.experimental.categories.Category;
-039
-040@Category({MiscTests.class,
SmallTests.class})
-041public class TestCellUtil {
-042 /**
-043 * CellScannable used in test. Returns
a {@link TestCellScanner}
-044 */
-045 private static class TestCellScannable
implements CellScannable {
-046private final int cellsCount;
-047TestCellScannable(final int
cellsCount) {
-048 this.cellsCount = cellsCount;
-049}
-050@Override
-051public CellScanner cellScanner() {
-052 return new
TestCellScanner(this.cellsCount);
-053}
-054 }
-055
-056 /**
-057 * CellScanner used in test.
-058 */
-059 private static class TestCellScanner
implements CellScanner {
-060private int count = 0;
-061private Cell current = null;
-062private final int cellsCount;
-063
-064TestCellScanner(final int cellsCount)
{
-065 this.cellsCount = cellsCount;
-066}
-067
-068@Override
-069public Cell current() {
-070 return this.current;
-071}
-072
-073@Override
-074public boolean advance() throws
IOException {
-075 if (this.count cellsCount) {
-076this.current = new
TestCell(this.count);
-077this.count++;
-078return true;
-079 }
-080 return false;
-081}
-082 }
-083
-084 /**
-085 * Cell used in test. Has row only.
-086 */
-087 private static class TestCell
implements Cell {
-088private final byte [] row;
-089
-090TestCell(final int i) {
-091 this.row = Bytes.toBytes(i);
-092}
-093
-094@Override
-095public byte[] getRowArray() {
-096 return this.row;
-097}
-098
-099@Override
-100public int getRowOffset() {
-101 return 0;
-102}
-103
-104@Override
-105public short getRowLength() {
-106 return (short)this.row.length;
-107}
-108
-109@Override
-110public byte[] getFamilyArray() {
-111 // TODO Auto-generated method
stub
-112 return null;
-113}
-114
-115@Override
-116public int getFamilyOffset() {
-117 // TODO Auto-generated method
stub
-118 return 0;
-119}
-120
-121@Override
-122public byte getFamilyLength() {
-123 // TODO Auto-generated method
stub
-124 return 0;
-125}
-126
-127@Override
-128public byte[] getQualifierArray() {
-129 // TODO Auto-generated method
stub
-130 return null;
-131}
-132
-133@Override
-134public int getQualifierOffset() {
-135 // TODO Auto-generated method
stub
-136 return 0;
-137}
-138
-139@Override
-140public int getQualifierLength() {
-141 // TODO Auto-generated method
stub
-142 return 0;
-143}
-144
-145@Override
-146public long getTimestamp() {
-147 // TODO Auto-generated method
stub
-148 return 0;
-149}
-150
-151@Override
-152public byte getTypeByte() {
-153 // TODO Auto-generated method
stub
-154 return 0;
-155}
-156
-157@Override
-158public byte[] getValueArray() {
-159 // TODO Auto-generated method
stub
-160 return null;
-161}
-162
-163@Override
-164public int getValueOffset() {
-165 // TODO Auto-generated method
stub
-166 return 0;
-167}
-168
-169@Override
-170public int getValueLength() {
-171 // TODO Auto-generated method
stub
-172 return 0;
-173}
-174
-175@Override
-176public byte[] getTagsArray() {
-177 // TODO Auto-generated method
stub
-178 return null;
-179}
-180
-181@Override
-182public int getTagsOffset() {
-183 // TODO Auto-generated method
stub
-184 return 0;
-185}
-186
-187@Override
-188public long getSequenceId() {
-189 // TODO Auto-generated method
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
--
diff --git
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
index 9694561..2bcceb6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
-public class TestHStore
+public class TestHStore
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Test class for the HStore
@@ -613,7 +613,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
@@ -622,7 +622,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
@@ -631,7 +631,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
region
-org.apache.hadoop.hbase.regionserver.HRegion region
+org.apache.hadoop.hbase.regionserver.HRegion region
@@ -640,7 +640,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
store
-org.apache.hadoop.hbase.regionserver.HStore store
+org.apache.hadoop.hbase.regionserver.HStore store
@@ -649,7 +649,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
table
-byte[] table
+byte[] table
@@ -658,7 +658,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
family
-byte[] family
+byte[] family
@@ -667,7 +667,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
row
-byte[] row
+byte[] row
@@ -676,7 +676,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
row2
-byte[] row2
+byte[] row2
@@ -685,7 +685,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf1
-byte[] qf1
+byte[] qf1
@@ -694,7 +694,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf2
-byte[] qf2
+byte[] qf2
@@ -703,7 +703,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf3
-byte[] qf3
+byte[] qf3
@@ -712,7 +712,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf4
-byte[] qf4
+byte[] qf4
@@ -721,7 +721,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf5
-byte[] qf5
+byte[] qf5
@@ -730,7 +730,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qf6
-byte[] qf6
+byte[] qf6
@@ -739,7 +739,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
qualifiers
-http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
title="class or interface in java.util">NavigableSetbyte[] qualifiers
+http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
title="class or interface in java.util">NavigableSetbyte[] qualifiers
@@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
expected
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.Cell expected
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.Cell expected
@@ -757,7 +757,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
result
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.Cell result
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in
java.util">Listorg.apache.hadoop.hbase.Cell result
@@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
id
-long id
+long id
@@ -775,7 +775,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
get
-org.apache.hadoop.hbase.client.Get get
+org.apache.hadoop.hbase.client.Get get
@@ -784,7 +784,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
@@ -793,7 +793,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
DIR
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
index 1301239..59c238b 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
@@ -303,7 +303,9 @@ extends
RemoteProcedureDispatcher.RemoteOperation
remoteCallBuild(MasterProcedureEnvenv,
- ServerNameserverName)
+ ServerNameserverName)
+For building the remote operation.
+
protected boolean
@@ -350,7 +352,7 @@ extends RegionTransitionProcedure
-abort,
acquireLock,
addToRemoteDispatcher,
execute,
getRegionInfo,
getRegionState,
getTableName,
getTransitionState,
hasLock,
holdLock,
isMeta,
isServerOnline,
isServerOnline,
releaseLock,
remoteCallCompleted,
remoteCallFailed,
reportTransition,
rollback,
setRegionInfo,
setTransitionState,
shouldWaitClientAck,
toStringState
+abort,
acquireLock,
addToRemoteDispatcher,
execute,
getRegionInfo,
getRegionState,
getTableName,
getTransitionState,
hasLock,
holdLock,
isMeta,
isServerOnline,
isServerOnline,
releaseLock,
remoteCallFailed,
remoteOperationCompleted,
remoteOperationFailed,
reportTransition,
rollback,
setRegionInfo, setTransitionState,
shouldWaitClientAck,
toStringState
@@ -600,6 +602,8 @@ extends RemoteProcedureDispatcher.RemoteOperationremoteCallBuild(MasterProcedureEnvenv,
ServerNameserverName)
+Description copied from
interface:RemoteProcedureDispatcher.RemoteProcedure
+For building the remote operation.
Specified by:
remoteCallBuildin
interfaceRemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index b99a6b3..f82f773 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -197,8 +197,8 @@
java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
title="class or interface in java.io">Serializable)
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
+org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
index f9e5288..f47901d 100644
---
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
+++
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
PrevClass
-NextClass
+NextClass
Frames
@@ -344,7 +344,7 @@ extends
PrevClass
-NextClass
+NextClass
Frames
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
deleted file mode 100644
index 9b47c21..000
---
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
+++ /dev/null
@@ -1,350 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-ReplicationZKNodeCleaner.ReplicationQueueDeletor (Apache HBase
3.0.0-SNAPSHOT API)
-
-
-
-
-
+var methods =
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.io.encoding
+Class BufferedDataBlockEncoder.OffheapDecodedExtendedCell
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.io.encoding.BufferedDataBlockEncoder.OffheapDecodedExtendedCell
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
+
+
+Enclosing class:
+BufferedDataBlockEncoder
+
+
+
+protected static class BufferedDataBlockEncoder.OffheapDecodedExtendedCell
+extends ByteBufferExtendedCell
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from
interfaceorg.apache.hadoop.hbase.Cell
+Cell.Type
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private byte
+familyLength
+
+
+private int
+familyOffset
+
+
+private static long
+FIXED_OVERHEAD
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
+keyBuffer
+
+
+private int
+qualifierLength
+
+
+private int
+qualifierOffset
+
+
+private short
+rowLength
+
+
+private long
+seqId
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
+tagsBuffer
+
+
+private int
+tagsLength
+
+
+private int
+tagsOffset
+
+
+private long
+timestamp
+
+
+private byte
+typeByte
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffer
+valueBuffer
+
+
+private int
+valueLength
+
+
+private int
+valueOffset
+
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
+MAX_TAGS_LENGTH
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Modifier
+Constructor and Description
+
+
+protected
+OffheapDecodedExtendedCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBufferkeyBuffer,
+ shortrowLength,
+ intfamilyOffset,
+ bytefamilyLength,
+ intqualOffset,
+ intqualLength,
+ longtimeStamp,
+ bytetypeByte,
+ http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
title="class or interface in java.nio">ByteBuffervalueBuffer,
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
index 3400507..2baa140 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
@@ -28,3034 +28,2926 @@
020import static
org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
021import static
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
022
-023import
com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.DataOutput;
-026import java.io.DataOutputStream;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.math.BigDecimal;
-030import java.nio.ByteBuffer;
-031import java.util.ArrayList;
-032import java.util.Iterator;
-033import java.util.List;
-034import java.util.Optional;
-035
-036import
org.apache.hadoop.hbase.KeyValue.Type;
-037import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-038import
org.apache.hadoop.hbase.io.HeapSize;
-039import
org.apache.hadoop.hbase.io.TagCompressionContext;
-040import
org.apache.hadoop.hbase.io.util.Dictionary;
-041import
org.apache.hadoop.hbase.io.util.StreamUtils;
-042import
org.apache.hadoop.hbase.util.ByteBufferUtils;
-043import
org.apache.hadoop.hbase.util.ByteRange;
-044import
org.apache.hadoop.hbase.util.Bytes;
-045import
org.apache.hadoop.hbase.util.ClassSize;
-046import
org.apache.yetus.audience.InterfaceAudience;
-047
-048
-049/**
-050 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
-051 * rich set of APIs than those in {@link
CellUtil} for internal usage.
-052 */
-053@InterfaceAudience.Private
-054public final class PrivateCellUtil {
-055
-056 /**
-057 * Private constructor to keep this
class from being instantiated.
-058 */
-059 private PrivateCellUtil() {
-060 }
+023import java.io.DataOutput;
+024import java.io.DataOutputStream;
+025import java.io.IOException;
+026import java.io.OutputStream;
+027import java.math.BigDecimal;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Iterator;
+031import java.util.List;
+032import java.util.Optional;
+033import
org.apache.hadoop.hbase.KeyValue.Type;
+034import
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+035import
org.apache.hadoop.hbase.io.HeapSize;
+036import
org.apache.hadoop.hbase.io.TagCompressionContext;
+037import
org.apache.hadoop.hbase.io.util.Dictionary;
+038import
org.apache.hadoop.hbase.io.util.StreamUtils;
+039import
org.apache.hadoop.hbase.util.ByteBufferUtils;
+040import
org.apache.hadoop.hbase.util.ByteRange;
+041import
org.apache.hadoop.hbase.util.Bytes;
+042import
org.apache.hadoop.hbase.util.ClassSize;
+043import
org.apache.yetus.audience.InterfaceAudience;
+044
+045import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+046
+047/**
+048 * Utility methods helpful slinging
{@link Cell} instances. It has more powerful and
+049 * rich set of APIs than those in {@link
CellUtil} for internal usage.
+050 */
+051@InterfaceAudience.Private
+052public final class PrivateCellUtil {
+053
+054 /**
+055 * Private constructor to keep this
class from being instantiated.
+056 */
+057 private PrivateCellUtil() {
+058 }
+059
+060 /*** ByteRange
***/
061
-062 /*** ByteRange
***/
-063
-064 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
-065return range.set(cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
-066 }
-067
-068 public static ByteRange
fillFamilyRange(Cell cell, ByteRange range) {
-069return
range.set(cell.getFamilyArray(), cell.getFamilyOffset(),
cell.getFamilyLength());
-070 }
-071
-072 public static ByteRange
fillQualifierRange(Cell cell, ByteRange range) {
-073return
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-074 cell.getQualifierLength());
-075 }
-076
-077 public static ByteRange
fillValueRange(Cell cell, ByteRange range) {
-078return
range.set(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
-079 }
-080
-081 public static ByteRange
fillTagRange(Cell cell, ByteRange range) {
-082return range.set(cell.getTagsArray(),
cell.getTagsOffset(), cell.getTagsLength());
-083 }
+062 public static ByteRange
fillRowRange(Cell cell, ByteRange range) {
+063return range.set(cell.getRowArray(),
cell.getRowOffset(), cell.getRowLength());
+064 }
+065
+066 public static ByteRange
fillFamilyRange(Cell cell, ByteRange range) {
+067return
range.set(cell.getFamilyArray(),
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 9d49b9a..c36fdce 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -165,3380 +165,3375 @@
157import
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
158import
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
159import
org.apache.hadoop.hbase.replication.ReplicationException;
-160import
org.apache.hadoop.hbase.replication.ReplicationFactory;
-161import
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-162import
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-163import
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-164import
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
-165import
org.apache.hadoop.hbase.replication.regionserver.Replication;
-166import
org.apache.hadoop.hbase.security.AccessDeniedException;
-167import
org.apache.hadoop.hbase.security.UserProvider;
-168import
org.apache.hadoop.hbase.trace.TraceUtil;
-169import
org.apache.hadoop.hbase.util.Addressing;
-170import
org.apache.hadoop.hbase.util.Bytes;
-171import
org.apache.hadoop.hbase.util.CompressionTest;
-172import
org.apache.hadoop.hbase.util.EncryptionTest;
-173import
org.apache.hadoop.hbase.util.FSUtils;
-174import
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-175import
org.apache.hadoop.hbase.util.HasThread;
-176import
org.apache.hadoop.hbase.util.IdLock;
-177import
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-178import
org.apache.hadoop.hbase.util.Pair;
-179import
org.apache.hadoop.hbase.util.Threads;
-180import
org.apache.hadoop.hbase.util.VersionInfo;
-181import
org.apache.hadoop.hbase.util.ZKDataMigrator;
-182import
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-183import
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-184import
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-185import
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-186import
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-187import
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-188import
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-189import
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-190import
org.apache.yetus.audience.InterfaceAudience;
-191import
org.apache.zookeeper.KeeperException;
-192import org.eclipse.jetty.server.Server;
-193import
org.eclipse.jetty.server.ServerConnector;
-194import
org.eclipse.jetty.servlet.ServletHolder;
-195import
org.eclipse.jetty.webapp.WebAppContext;
-196import org.slf4j.Logger;
-197import org.slf4j.LoggerFactory;
-198
-199import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-200import
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-201import
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-202import
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-203import
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-204import
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-205import
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-206import
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-207import
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-208import
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-209
-210/**
-211 * HMaster is the "master server" for
HBase. An HBase cluster has one active
-212 * master. If many masters are started,
all compete. Whichever wins goes on to
-213 * run the cluster. All others park
themselves in their constructor until
-214 * master or cluster shutdown or until
the active master loses its lease in
-215 * zookeeper. Thereafter, all running
master jostle to take over master role.
-216 *
-217 * pThe Master can be asked
shutdown the cluster. See {@link #shutdown()}. In
-218 * this case it will tell all
regionservers to go down and then wait on them
-219 * all reporting in that they are down.
This master will then shut itself down.
+160import
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+161import
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+162import
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
+163import
org.apache.hadoop.hbase.replication.regionserver.Replication;
+164import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
index 6fecbc9..2accda0 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
@@ -34,4140 +34,4141 @@
026import
java.nio.charset.StandardCharsets;
027import java.util.ArrayList;
028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import
java.util.concurrent.TimeoutException;
-042import
java.util.concurrent.atomic.AtomicInteger;
-043import
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import
org.apache.hadoop.conf.Configuration;
-048import
org.apache.hadoop.hbase.Abortable;
-049import
org.apache.hadoop.hbase.CacheEvictionStats;
-050import
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import
org.apache.hadoop.hbase.ClusterStatus;
-053import
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import
org.apache.hadoop.hbase.HBaseConfiguration;
-055import
org.apache.hadoop.hbase.HConstants;
-056import
org.apache.hadoop.hbase.HRegionInfo;
-057import
org.apache.hadoop.hbase.HRegionLocation;
-058import
org.apache.hadoop.hbase.HTableDescriptor;
-059import
org.apache.hadoop.hbase.MasterNotRunningException;
-060import
org.apache.hadoop.hbase.MetaTableAccessor;
-061import
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import
org.apache.hadoop.hbase.NotServingRegionException;
-064import
org.apache.hadoop.hbase.RegionLoad;
-065import
org.apache.hadoop.hbase.RegionLocations;
-066import
org.apache.hadoop.hbase.ServerName;
-067import
org.apache.hadoop.hbase.TableExistsException;
-068import
org.apache.hadoop.hbase.TableName;
-069import
org.apache.hadoop.hbase.TableNotDisabledException;
-070import
org.apache.hadoop.hbase.TableNotFoundException;
-071import
org.apache.hadoop.hbase.UnknownRegionException;
-072import
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import
org.apache.hadoop.hbase.replication.ReplicationException;
-086import
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import
org.apache.hadoop.hbase.util.Addressing;
-094import
org.apache.hadoop.hbase.util.Bytes;
-095import
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import
org.apache.hadoop.hbase.util.Pair;
-098import
org.apache.hadoop.ipc.RemoteException;
-099import
org.apache.hadoop.util.StringUtils;
-100import
org.apache.yetus.audience.InterfaceAudience;
-101import
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-107import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
b/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
new file mode 100644
index 000..42ae20b
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
@@ -0,0 +1,709 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ClusterMetricsBuilder (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods =
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class
ClusterMetricsBuilder
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ClusterMetricsBuilder
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public final class ClusterMetricsBuilder
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+private static class
+ClusterMetricsBuilder.ClusterMetricsImpl
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListServerName
+backupMasterNames
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in java.lang">Boolean
+balancerOn
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
+clusterId
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListServerName
+deadServerNames
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
+hbaseVersion
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
title="class or interface in java.util">MapServerName,ServerMetrics
+liveServerMetrics
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String
+masterCoprocessorNames
+
+
+private int
+masterInfoPort
+
+
+private ServerName
+masterName
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListRegionState
+regionsInTransition
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Modifier
+Constructor and Description
+
+
+private
+ClusterMetricsBuilder()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+ClusterMetrics
+build()
+
+
+static ClusterMetricsBuilder
+newBuilder()
+
+
+ClusterMetricsBuilder
+setBackerMasterNames(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
title="class or interface in java.util">ListServerNamevalue)
+
+
+ClusterMetricsBuilder
+setBalancerOn(http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
title="class or interface in
java.lang">Booleanvalue)
+
+
+ClusterMetricsBuilder
+setClusterId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in
java.lang">Stringvalue)
+
+
+ClusterMetricsBuilder
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
index 2db8dcc..22a1749 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
@InterfaceAudience.Private
-public class HFileArchiver
+public class HFileArchiver
extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
title="class or interface in java.lang">Object
Utility class to handle the removal of HFiles (or the
respective StoreFiles)
for a HRegion from the FileSystem. The hfiles will be archived
or deleted, depending on
@@ -198,7 +198,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
FUNC_FILE_TO_PATH
-private static
org.apache.commons.logging.Log
+private static org.slf4j.Logger
LOG
@@ -366,7 +366,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.slf4j.Logger LOG
@@ -375,7 +375,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
SEPARATOR
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String SEPARATOR
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
title="class or interface in java.lang">String SEPARATOR
See Also:
Constant
Field Values
@@ -388,7 +388,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
DEFAULT_RETRIES_NUMBER
-private static finalint DEFAULT_RETRIES_NUMBER
+private static finalint DEFAULT_RETRIES_NUMBER
Number of retries in case of fs operation failure
See Also:
@@ -402,7 +402,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
FUNC_FILE_TO_PATH
-private static
finalorg.apache.hadoop.hbase.shaded.com.google.common.base.FunctionHFileArchiver.File,org.apache.hadoop.fs.Path
FUNC_FILE_TO_PATH
+private static
finalorg.apache.hadoop.hbase.shaded.com.google.common.base.FunctionHFileArchiver.File,org.apache.hadoop.fs.Path
FUNC_FILE_TO_PATH
@@ -419,7 +419,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
HFileArchiver
-privateHFileArchiver()
+privateHFileArchiver()
@@ -436,7 +436,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
exists
-public staticbooleanexists(org.apache.hadoop.conf.Configurationconf,
+public staticbooleanexists(org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
RegionInfoinfo)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
@@ -454,7 +454,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
archiveRegion
-public staticvoidarchiveRegion(org.apache.hadoop.conf.Configurationconf,
+public staticvoidarchiveRegion(org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
RegionInfoinfo)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
title="class or interface in java.io">IOException
@@ -476,7 +476,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
archiveRegion
-public staticbooleanarchiveRegion(org.apache.hadoop.fs.FileSystemfs,
+public staticbooleanarchiveRegion(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathrootdir,
org.apache.hadoop.fs.PathtableDir,
org.apache.hadoop.fs.PathregionDir)
@@ -503,7 +503,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
archiveFamily
-public staticvoidarchiveFamily(org.apache.hadoop.fs.FileSystemfs,
+public staticvoidarchiveFamily(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.conf.Configurationconf,
RegionInfoparent,
org.apache.hadoop.fs.PathtableDir,
@@ -529,7 +529,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
archiveFamilyByFamilyDir
-public staticvoidarchiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
+public staticvoidarchiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
--
diff --git
a/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
b/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
index 8ca79a6..5ed25c3 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
@@ -25,11 +25,11 @@
017 */
018package org.apache.hadoop.hbase.types;
019
-020import
org.apache.yetus.audience.InterfaceAudience;
-021import
org.apache.hadoop.hbase.util.Bytes;
-022import
org.apache.hadoop.hbase.util.Order;
-023import
org.apache.hadoop.hbase.util.PositionedByteRange;
-024import
org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange;
+020import
org.apache.hadoop.hbase.util.Bytes;
+021import
org.apache.hadoop.hbase.util.Order;
+022import
org.apache.hadoop.hbase.util.PositionedByteRange;
+023import
org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange;
+024import
org.apache.yetus.audience.InterfaceAudience;
025
026/**
027 * Wraps an existing {@code DataType}
implementation as a terminated
@@ -48,124 +48,145 @@
040 * @throws IllegalArgumentException
when {@code term} is null or empty.
041 */
042 public
TerminatedWrapper(DataTypeT wrapped, byte[] term) {
-043if (null == term || term.length ==
0)
+043if (null == term || term.length == 0)
{
044 throw new
IllegalArgumentException("terminator must be non-null and non-empty.");
-045this.wrapped = wrapped;
-046wrapped.getOrder().apply(term);
-047this.term = term;
-048 }
-049
-050 /**
-051 * Create a terminated version of the
{@code wrapped}.
-052 * {@code term} is converted to a
{@code byte[]} using
-053 * {@link Bytes#toBytes(String)}.
-054 * @throws IllegalArgumentException
when {@code term} is null or empty.
-055 */
-056 public
TerminatedWrapper(DataTypeT wrapped, String term) {
-057this(wrapped, Bytes.toBytes(term));
-058 }
-059
-060 @Override
-061 public boolean isOrderPreserving() {
return wrapped.isOrderPreserving(); }
-062
-063 @Override
-064 public Order getOrder() { return
wrapped.getOrder(); }
+045}
+046this.wrapped = wrapped;
+047wrapped.getOrder().apply(term);
+048this.term = term;
+049 }
+050
+051 /**
+052 * Create a terminated version of the
{@code wrapped}.
+053 * {@code term} is converted to a
{@code byte[]} using
+054 * {@link Bytes#toBytes(String)}.
+055 * @throws IllegalArgumentException
when {@code term} is null or empty.
+056 */
+057 public
TerminatedWrapper(DataTypeT wrapped, String term) {
+058this(wrapped, Bytes.toBytes(term));
+059 }
+060
+061 @Override
+062 public boolean isOrderPreserving() {
+063return wrapped.isOrderPreserving();
+064 }
065
066 @Override
-067 public boolean isNullable() { return
wrapped.isNullable(); }
-068
-069 @Override
-070 public boolean isSkippable() { return
true; }
-071
-072 @Override
-073 public int encodedLength(T val) {
-074return wrapped.encodedLength(val) +
term.length;
-075 }
-076
-077 @Override
-078 public ClassT encodedClass() {
return wrapped.encodedClass(); }
-079
-080 /**
-081 * Return the position at which {@code
term} begins within {@code src},
-082 * or {@code -1} if {@code term} is not
found.
-083 */
-084 protected int
terminatorPosition(PositionedByteRange src) {
-085byte[] a = src.getBytes();
-086final int offset = src.getOffset();
-087int i;
-088SKIP: for (i = src.getPosition(); i
src.getLength(); i++) {
-089 if (a[offset + i] != term[0])
continue;
-090 int j;
-091 for (j = 1; j term.length
offset + j src.getLength(); j++) {
-092if (a[offset + i + j] != term[j])
continue SKIP;
-093 }
-094 if (j == term.length) return i; //
success
-095}
-096return -1;
-097 }
-098
-099 /**
-100 * Skip {@code src}'s position forward
over one encoded value.
-101 * @param src the buffer containing the
encoded value.
-102 * @return number of bytes skipped.
-103 * @throws IllegalArgumentException
when the terminator sequence is not found.
-104 */
-105 @Override
-106 public int skip(PositionedByteRange
src) {
-107if (wrapped.isSkippable()) {
-108 int ret = wrapped.skip(src);
-109 src.setPosition(src.getPosition() +
term.length);
-110 return ret + term.length;
-111} else {
-112 // find the terminator position
-113 final int start =
src.getPosition();
-114 int skipped =
terminatorPosition(src);
-115 if (-1 == skipped) throw new
IllegalArgumentException("Terminator sequence not found.");
-116 skipped += term.length;
-117 src.setPosition(skipped);
-118 return skipped - start;
-119}
-120 }
-121
+067 public Order getOrder() {
+068return
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
index a9c1142..12ade22 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
@@ -388,196 +388,200 @@
380
381 @Override
382 public DatanodeInfo[] getPipeline() {
-383State state = this.state;
-384return state == State.STREAMING ||
state == State.CLOSING ? locations : new DatanodeInfo[0];
-385 }
-386
-387 private void
flushBuffer(CompletableFutureLong future, ByteBuf dataBuf,
-388 long nextPacketOffsetInBlock,
boolean syncBlock) {
-389int dataLen =
dataBuf.readableBytes();
-390int chunkLen =
summer.getBytesPerChecksum();
-391int trailingPartialChunkLen = dataLen
% chunkLen;
-392int numChecks = dataLen / chunkLen +
(trailingPartialChunkLen != 0 ? 1 : 0);
-393int checksumLen = numChecks *
summer.getChecksumSize();
-394ByteBuf checksumBuf =
alloc.directBuffer(checksumLen);
-395
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0,
checksumLen));
-396
checksumBuf.writerIndex(checksumLen);
-397PacketHeader header = new
PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
-398nextPacketSeqno, false, dataLen,
syncBlock);
-399int headerLen =
header.getSerializedSize();
-400ByteBuf headerBuf =
alloc.buffer(headerLen);
-401
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
-402headerBuf.writerIndex(headerLen);
-403Callback c = new Callback(future,
nextPacketOffsetInBlock + dataLen, datanodeList);
-404waitingAckQueue.addLast(c);
-405// recheck again after we pushed the
callback to queue
-406if (state != State.STREAMING
waitingAckQueue.peekFirst() == c) {
-407 future.completeExceptionally(new
IOException("stream already broken"));
-408 // it's the one we have just pushed
or just a no-op
-409 waitingAckQueue.removeFirst();
-410 return;
-411}
-412datanodeList.forEach(ch - {
-413
ch.write(headerBuf.retainedDuplicate());
-414
ch.write(checksumBuf.retainedDuplicate());
-415
ch.writeAndFlush(dataBuf.retainedDuplicate());
-416});
-417checksumBuf.release();
-418headerBuf.release();
-419dataBuf.release();
-420nextPacketSeqno++;
-421 }
-422
-423 private void
flush0(CompletableFutureLong future, boolean syncBlock) {
-424if (state != State.STREAMING) {
-425 future.completeExceptionally(new
IOException("stream already broken"));
-426 return;
-427}
-428int dataLen = buf.readableBytes();
-429if (dataLen ==
trailingPartialChunkLength) {
-430 // no new data
-431 long lengthAfterFlush =
nextPacketOffsetInBlock + dataLen;
-432 Callback lastFlush =
waitingAckQueue.peekLast();
-433 if (lastFlush != null) {
-434Callback c = new Callback(future,
lengthAfterFlush, Collections.emptyList());
-435waitingAckQueue.addLast(c);
-436// recheck here if we have
already removed the previous callback from the queue
-437if (waitingAckQueue.peekFirst()
== c) {
-438 // all previous callbacks have
been removed
-439 // notice that this does mean
we will always win here because the background thread may
-440 // have already started to mark
the future here as completed in the completed or failed
-441 // methods but haven't removed
it from the queue yet. That's also why the removeFirst
-442 // call below may be a no-op.
-443 if (state != State.STREAMING)
{
-444
future.completeExceptionally(new IOException("stream already broken"));
-445 } else {
-446
future.complete(lengthAfterFlush);
-447 }
-448 // it's the one we have just
pushed or just a no-op
-449
waitingAckQueue.removeFirst();
-450}
-451 } else {
-452// we must have acked all the
data so the ackedBlockLength must be same with
-453// lengthAfterFlush
-454
future.complete(lengthAfterFlush);
-455 }
-456 return;
-457}
-458
-459if (encryptor != null) {
-460 ByteBuf encryptBuf =
alloc.directBuffer(dataLen);
-461 buf.readBytes(encryptBuf,
trailingPartialChunkLength);
-462 int toEncryptLength = dataLen -
trailingPartialChunkLength;
-463 try {
-464
encryptor.encrypt(buf.nioBuffer(trailingPartialChunkLength, toEncryptLength),
-465
encryptBuf.nioBuffer(trailingPartialChunkLength, toEncryptLength));
-466
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
index 851e7d3..97311b3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
@@ -7,322 +7,318 @@
001/**
-002 *
-003 * Licensed to the Apache Software
Foundation (ASF) under one
-004 * or more contributor license
agreements. See the NOTICE file
-005 * distributed with this work for
additional information
-006 * regarding copyright ownership. The
ASF licenses this file
-007 * to you under the Apache License,
Version 2.0 (the
-008 * "License"); you may not use this file
except in compliance
-009 * with the License. You may obtain a
copy of the License at
-010 *
-011 *
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or
agreed to in writing, software
-014 * distributed under the License is
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
-016 * See the License for the specific
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software
Foundation (ASF) under one
+003 * or more contributor license
agreements. See the NOTICE file
+004 * distributed with this work for
additional information
+005 * regarding copyright ownership. The
ASF licenses this file
+006 * to you under the Apache License,
Version 2.0 (the
+007 * "License"); you may not use this file
except in compliance
+008 * with the License. You may obtain a
copy of the License at
+009 *
+010 *
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or
agreed to in writing, software
+013 * distributed under the License is
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
+015 * See the License for the specific
language governing permissions and
+016 * limitations under the License.
+017 */
+018package
org.apache.hadoop.hbase.executor;
019
-020package
org.apache.hadoop.hbase.executor;
+020import
org.apache.yetus.audience.InterfaceAudience;
021
-022import
org.apache.yetus.audience.InterfaceAudience;
-023
-024/**
-025 * List of all HBase event handler types.
Event types are named by a
-026 * convention: event type names specify
the component from which the event
-027 * originated and then where its destined
-- e.g. RS2ZK_ prefix means the
-028 * event came from a regionserver
destined for zookeeper -- and then what
-029 * the even is; e.g. REGION_OPENING.
-030 *
-031 * pWe give the enums indices so
we can add types later and keep them
-032 * grouped together rather than have to
add them always to the end as we
-033 * would have to if we used raw enum
ordinals.
-034 */
-035@InterfaceAudience.Private
-036public enum EventType {
-037 // Messages originating from RS (NOTE:
there is NO direct communication from
-038 // RS to Master). These are a result of
RS updates into ZK.
-039 // RS_ZK_REGION_CLOSING(1), // It
is replaced by M_ZK_REGION_CLOSING(HBASE-4739)
-040
-041 /**
-042 * RS_ZK_REGION_CLOSEDbr
-043 *
-044 * RS has finished closing a region.
-045 */
-046 RS_ZK_REGION_CLOSED (2,
ExecutorType.MASTER_CLOSE_REGION),
-047 /**
-048 * RS_ZK_REGION_OPENINGbr
-049 *
-050 * RS is in process of opening a
region.
-051 */
-052 RS_ZK_REGION_OPENING (3, null),
-053 /**
-054 * RS_ZK_REGION_OPENEDbr
-055 *
-056 * RS has finished opening a region.
-057 */
-058 RS_ZK_REGION_OPENED (4,
ExecutorType.MASTER_OPEN_REGION),
-059 /**
-060 * RS_ZK_REGION_SPLITTINGbr
-061 *
-062 * RS has started a region split after
master says it's ok to move on.
-063 */
-064 RS_ZK_REGION_SPLITTING(5, null),
-065 /**
-066 * RS_ZK_REGION_SPLITbr
-067 *
-068 * RS split has completed and is
notifying the master.
-069 */
-070 RS_ZK_REGION_SPLIT(6,
ExecutorType.MASTER_SERVER_OPERATIONS),
-071 /**
-072 * RS_ZK_REGION_FAILED_OPENbr
-073 *
-074 * RS failed to open a region.
-075 */
-076 RS_ZK_REGION_FAILED_OPEN (7,
ExecutorType.MASTER_CLOSE_REGION),
-077 /**
-078 * RS_ZK_REGION_MERGINGbr
-079 *
-080 * RS has started merging regions after
master says it's ok to move on.
-081 */
-082 RS_ZK_REGION_MERGING (8, null),
-083 /**
-084 * RS_ZK_REGION_MERGEbr
-085 *
-086 * RS region merge has completed and is
notifying the master.
-087 */
-088 RS_ZK_REGION_MERGED (9,
ExecutorType.MASTER_SERVER_OPERATIONS),
-089 /**
-090 *
RS_ZK_REQUEST_REGION_SPLITbr
-091 *
-092 * RS has requested to split a region.
This
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
index 8c8c306..d6a8ea3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
@@ -26,7 +26,7 @@
018
019package org.apache.hadoop.hbase;
020
-021import
org.apache.hadoop.hbase.util.ArrayUtils;
+021import
org.apache.commons.lang3.ArrayUtils;
022import
org.apache.hadoop.hbase.util.Bytes;
023import
org.apache.hadoop.hbase.util.ClassSize;
024import
org.apache.yetus.audience.InterfaceAudience;
@@ -69,12 +69,12 @@
061
062 public IndividualBytesFieldCell(byte[]
row, byte[] family, byte[] qualifier,
063 long
timestamp, KeyValue.Type type, long seqId, byte[] value, byte[] tags) {
-064this(row, 0,
ArrayUtils.length(row),
-065family, 0,
ArrayUtils.length(family),
-066qualifier, 0,
ArrayUtils.length(qualifier),
+064this(row, 0,
ArrayUtils.getLength(row),
+065family, 0,
ArrayUtils.getLength(family),
+066qualifier, 0,
ArrayUtils.getLength(qualifier),
067timestamp, type, seqId,
-068value, 0,
ArrayUtils.length(value),
-069tags, 0,
ArrayUtils.length(tags));
+068value, 0,
ArrayUtils.getLength(value),
+069tags, 0,
ArrayUtils.getLength(tags));
070 }
071
072 public IndividualBytesFieldCell(byte[]
row, int rOffset, int rLength,
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 65a682f..80d98d6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
008@InterfaceAudience.Private
009public class Version {
010 public static final String version =
"3.0.0-SNAPSHOT";
-011 public static final String revision =
"2e813f106f2647f8704378efbf3531051c5aea78";
+011 public static final String revision =
"7f586995a8bd2888721e2d9210368494bf8fc957";
012 public static final String user =
"jenkins";
-013 public static final String date = "Tue
Dec 12 14:42:35 UTC 2017";
+013 public static final String date = "Wed
Dec 13 14:42:16 UTC 2017";
014 public static final String url =
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015 public static final String srcChecksum
= "5e16f126f518d8df265d748b4f2d28a5";
+015 public static final String srcChecksum
= "22e0e51cc8c8efa2537ec94a00d6a348";
016}
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
index 5832fe5..736c955 100644
--- a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
@@ -271,7 +271,7 @@ implements RegionObserver
-postAppend,
postBatchMutate,
postBatchMutateIndispensably,
postBulkLoadHFile,
postCheckAndDelete,
postCheckAndPut,
postClose,
postCloseRegionOperation, postCommitStoreFile,
postCompact,
postCompactSelection, postDelete,
postExists,
postFlush,
postFlush, postGetOp,
postIncrement,
postInstantiateDeleteTracker,
postMemStore
Compaction, postMutationBeforeWAL,
postOpen,
postPut,
postReplayWALs, postScannerClose,
postScannerNext,
postScannerOpen,
postStartRegionOperatio
n, postStoreFileReaderOpen,
postWALRestore,
preAppend,
preAppendAfterRowLock,
preBatchMutate,
preBulkLoadHFile,
preCheckAndDelete,
preCheckAndDeleteAfterRowLock,
preCheckAndPut,
preCheckAndPutAfterRowLock,
preClose,
preCommitStoreFile,
preCompact,
preCompactScannerOpen,
preCompactSelection,
preDelete,
preExis
ts, preFlush,
preFlush,
preFlushScannerOpen,
preGetOp, preIncrement,
preIncrementAfterRowLock,
preMemStoreCompaction,
preMemStoreCompactionCompact, preMemStoreCompactionCompactScannerOpen,
preOpen,
prePrepareTimeStampForDeleteVersion,
preReplayWALs, preScannerClose,
preScannerNext,
preScannerOpen,
preStoreFileReaderOpen,
preStoreScannerOpen,
preWALRestore
+postAppend,
postBatchMutate,
postBatchMutateIndispensably,
postBulkLoadHFile,
postCheckAndDelete,
postCheckAndPut,
postClose,
postCloseRegionOperation, postCommitStoreFile,
postCompact,
postCompactSelection, postDelete,
postExists,
postFlush,
postFlush, postGetOp,
postIncrement,
postInstantiateDeleteTracker,
postMemStoreCompacti
on, postMutationBeforeWAL,
postOpen,
postPut,
po
stReplayWALs, postScannerClose,
postScannerNext,
postScannerOpen,
postStartRegionOperation,
<
a
href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#postStoreFileReaderOpen-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.fs.Path-org.apache.hadoop.hbase.io.FSDataInputStreamWrapper-long-org.apache.hadoop.hbase.io.hfile.CacheConfig-org.apache.hadoop.hbase.io.Reference-org.apache.hadoop.hbase.regionserver.StoreFileReader-">postStoreFileReaderOpen,
postWALRestore,
preAppend,
preAppendAfterRowLock,
preBatchMutate,
preBulkLoadHFile,
preCheckAndDelete,
preCheckAndDeleteAfterRowLock,
preCheckAndPut,
preCheckAndPutAfterRowLock,
preClose,
preCommitStoreFile,
preCompact,
preCompactScannerOpen,
preCompactSelection,
preDelete,
preExists,
preFlush,
preFlush,
preFlushScannerOpen,
preGetOp, preIncrement,
preIncrementAfterRowLock,
preMemStoreCompaction,
preMemSt
oreCompactionCompact, preMemStoreCompactionCompactScannerOpen,
preOpen,
prePrepareTimeStampForDeleteVersion,
preReplayWALs, preScannerClose,
preScannerNext,
preScannerOpen,
preStoreFileReaderOpen,
preStoreScannerOpen,
preWALRestore
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
b/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
index 3cecc49..4a64995 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
@@ -501,7 +501,8 @@ implements Specified by:
getConfigurationin
interfaceCoprocessorEnvironmentC extends Coprocessor
Returns:
-the configuration
+a Read-only Configuration; throws http://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true;
title="class or interface in
java.lang">UnsupportedOperationException if you try
+ to set a configuration.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
--
diff --git
a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
index be05971..141225d 100644
---
a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
+++
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
@@ -134,13 +134,13 @@ extends
Connection
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
index cfdb581..836f61d 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Dependencies
@@ -924,216 +924,223 @@
jar
https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+org.apache.hbase
+http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
+3.0.0-SNAPSHOT
+tests
+test-jar
+https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version
2.0
+
org.apache.htrace
http://incubator.apache.org/projects/htrace.html;>htrace-core
3.2.0-incubating
-
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.apache.zookeeper
zookeeper
3.4.6
tests
test-jar
-
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-core-asl
1.9.13
-
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.codehaus.jackson
http://jackson.codehaus.org;>jackson-mapper-asl
1.9.13
-
jar
http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software
License, Version 2.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-http
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-io
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-security
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-server
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-servlet
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-util
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-util-ajax
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-webapp
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.eclipse.jetty
http://www.eclipse.org/jetty;>jetty-xml
9.3.19.v20170502
-
jar
http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License -
Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License
- Version 1.0
-
+
org.fusesource.leveldbjni
http://leveldbjni.fusesource.org/leveldbjni-all;>leveldbjni-all
1.8
-
jar
http://www.opensource.org/licenses/BSD-3-Clause;>The BSD 3-Clause
License
-
+
org.glassfish
http://uel.java.net;>javax.el
3.0.1-b08
-
jar
https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html;>CDDL + GPLv2
with classpath exception
-
+
org.glassfish.hk2
https://hk2.java.net/hk2-api;>hk2-api
2.5.0-b32
-
jar
https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2
with classpath exception
-
+
org.glassfish.hk2
https://hk2.java.net/hk2-locator;>hk2-locator
2.5.0-b32
-
jar
https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2
with classpath exception
-
+
org.glassfish.hk2
https://hk2.java.net/hk2-utils;>hk2-utils
2.5.0-b32
-
jar
https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2
with classpath exception
-
+
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
index af8b9cf..8344654 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Licenses
@@ -326,7 +326,7 @@
https://www.apache.org/;>The Apache Software
Foundation.
All rights reserved.
- Last Published:
2017-12-02
+ Last Published:
2017-12-03
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
index afe6a90..ec3a91a 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Mailing Lists
@@ -176,7 +176,7 @@
https://www.apache.org/;>The Apache Software
Foundation.
All rights reserved.
- Last Published:
2017-12-02
+ Last Published:
2017-12-03
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
index 0191301..bea329e 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Plugin Management
@@ -271,7 +271,7 @@
https://www.apache.org/;>The Apache Software
Foundation.
All rights reserved.
- Last Published:
2017-12-02
+ Last Published:
2017-12-03
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
index 967ed67..374e080 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Plugins
@@ -226,7 +226,7 @@
https://www.apache.org/;>The Apache Software
Foundation.
All rights reserved.
- Last Published:
2017-12-02
+ Last Published:
2017-12-03
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
--
diff --git
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
index 7f55382..19b1cd2 100644
---
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
+++
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
@@ -7,7 +7,7 @@
-
+
Apache HBase - Exemplar for hbase-client archetype Project
Information
@@ -167,7 +167,7 @@
https://www.apache.org/;>The Apache Software
Foundation.
All rights reserved.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
index 25e368d..d0f781f 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
@@ -25,798 +25,798 @@
017 */
018package
org.apache.hadoop.hbase.io.asyncfs;
019
-020import static
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
026import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
027import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
028import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
029import static
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
030
-031import
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import
com.google.protobuf.CodedOutputStream;
-034
-035import
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import
org.apache.commons.logging.LogFactory;
-067import
org.apache.hadoop.conf.Configuration;
-068import
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import
org.apache.hadoop.fs.UnresolvedLinkException;
-075import
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index d438f22..7c59e27 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -1290,8 +1290,8 @@
1282 CompactType
compactType) throws IOException {
1283switch (compactType) {
1284 case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName),
major,
-1286 columnFamily);
+1285
compact(this.connection.getAdminForMaster(),
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
1287break;
1288 case NORMAL:
1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
3240 new
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
3241@Override
3242public
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243 RegionInfo info =
getMobRegionInfo(tableName);
+3243 RegionInfo info =
RegionInfo.createMobRegionInfo(tableName);
3244 GetRegionInfoRequest
request =
3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
3246 GetRegionInfoResponse
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
3304}
3305break;
3306 default:
-3307throw new
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new
IllegalArgumentException("Unknown compactType: " + compactType);
3308}
3309if (state != null) {
3310 return
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
3839});
3840 }
3841
-3842 private RegionInfo
getMobRegionInfo(TableName tableName) {
-3843return
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845 }
-3846
-3847 private RpcControllerFactory
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849 }
-3850
-3851 @Override
-3852 public void addReplicationPeer(String
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853 throws IOException {
-3854executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855 @Override
-3856 protected Void rpcCall() throws
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig,
enabled));
-3859return null;
-3860 }
-3861});
-3862 }
-3863
-3864 @Override
-3865 public void
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867 @Override
-3868 protected Void rpcCall() throws
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872 }
-3873});
-3874 }
-3875
-3876 @Override
-3877 public void
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879 @Override
-3880 protected Void rpcCall() throws
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884 }
-3885});
-3886 }
-3887
-3888 @Override
-3889 public void
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891 @Override
-3892 protected Void rpcCall() throws
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896 }
-3897});
-3898 }
-3899
-3900 @Override
-3901 public ReplicationPeerConfig
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904 @Override
-3905 protected ReplicationPeerConfig
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse
response = master.getReplicationPeerConfig(
-3907 getRpcController(),
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 29ea7b3..6ed75c9 100644
---
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -1313,7093 +1313,7082 @@
1305
1306 @Override
1307 public boolean isSplittable() {
-1308boolean result = isAvailable()
!hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " +
result + " " + getRegionInfo().getShortNameToLog(),
-1310 new Throwable("LOGGING:
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store :
this.stores.values()) {
-1314 LOG.info("store " +
store.getColumnFamilyName());
-1315 for (HStoreFile sf :
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317 }
-1318}
-1319return result;
-1320 }
-1321
-1322 @Override
-1323 public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325 LOG.debug("Region " + this
-1326 + " is not mergeable because
it is closing or closed");
-1327 return false;
-1328}
-1329if (hasReferences()) {
-1330 LOG.debug("Region " + this
-1331 + " is not mergeable because
it has references");
-1332 return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()
!hasReferences();
+1309 }
+1310
+1311 @Override
+1312 public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314 LOG.debug("Region " + this
+1315 + " is not mergeable because
it is closing or closed");
+1316 return false;
+1317}
+1318if (hasReferences()) {
+1319 LOG.debug("Region " + this
+1320 + " is not mergeable because
it has references");
+1321 return false;
+1322}
+1323
+1324return true;
+1325 }
+1326
+1327 public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329 return
this.writestate.writesEnabled;
+1330}
+1331 }
+1332
+1333 @VisibleForTesting
+1334 public MultiVersionConcurrencyControl
getMVCC() {
+1335return mvcc;
1336 }
1337
-1338 public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340 return
this.writestate.writesEnabled;
-1341}
-1342 }
-1343
-1344 @VisibleForTesting
-1345 public MultiVersionConcurrencyControl
getMVCC() {
-1346return mvcc;
-1347 }
-1348
-1349 @Override
-1350 public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338 @Override
+1339 public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341 }
+1342
+1343 /**
+1344 * @return readpoint considering given
IsolationLevel. Pass {@code null} for default
+1345 */
+1346 public long
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null
isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348 // This scan can read even
uncommitted transactions
+1349 return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
1352 }
1353
-1354 /**
-1355 * @return readpoint considering given
IsolationLevel. Pass {@code null} for default
-1356 */
-1357 public long
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null
isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359 // This scan can read even
uncommitted transactions
-1360 return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363 }
-1364
-1365 public boolean
isLoadingCfsOnDemandDefault() {
-1366return
this.isLoadingCfsOnDemandDefault;
-1367 }
-1368
-1369 /**
-1370 * Close down this HRegion. Flush the
cache, shut down each HStore, don't
-1371 * service any more calls.
-1372 *
-1373 * pThis method could take
some time to execute, so don't call it from a
-1374 * time-sensitive thread.
-1375 *
-1376 * @return Vector of all the storage
files that the HRegion's component
-1377 * HStores make use of. It's a list
of all StoreFile objects. Returns empty
-1378 * vector if already closed and null
if judged that it should not close.
-1379 *
-1380 * @throws IOException e
-1381 * @throws DroppedSnapshotException
Thrown when replay of wal is required
-1382 * because a Snapshot was not properly
persisted. The region is put in closing mode, and the
-1383 * caller MUST abort after this.
-1384 */
-1385 public Mapbyte[],
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387 }
-1388
-1389 private final Object closeLock = new
Object();
-1390
-1391 /** Conf key for the periodic flush
interval */
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
index d98042d..d549086 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
@@ -42,2537 +42,2536 @@
034
035import org.apache.commons.logging.Log;
036import
org.apache.commons.logging.LogFactory;
-037import
org.apache.yetus.audience.InterfaceAudience;
+037import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
038import
org.apache.hadoop.hbase.util.ByteBufferUtils;
039import
org.apache.hadoop.hbase.util.Bytes;
040import
org.apache.hadoop.hbase.util.ClassSize;
041import
org.apache.hadoop.io.RawComparator;
-042
-043import
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-044/**
-045 * An HBase Key/Value. This is the
fundamental HBase Type.
-046 * p
-047 * HBase applications and users should
use the Cell interface and avoid directly using KeyValue and
-048 * member functions not defined in
Cell.
-049 * p
-050 * If being used client-side, the primary
methods to access individual fields are
-051 * {@link #getRowArray()}, {@link
#getFamilyArray()}, {@link #getQualifierArray()},
-052 * {@link #getTimestamp()}, and {@link
#getValueArray()}. These methods allocate new byte arrays
-053 * and return copies. Avoid their use
server-side.
-054 * p
-055 * Instances of this class are immutable.
They do not implement Comparable but Comparators are
-056 * provided. Comparators change with
context, whether user table or a catalog table comparison. Its
-057 * critical you use the appropriate
comparator. There are Comparators for normal HFiles, Meta's
-058 * Hfiles, and bloom filter keys.
-059 * p
-060 * KeyValue wraps a byte array and takes
offsets and lengths into passed array at where to start
-061 * interpreting the content as KeyValue.
The KeyValue format inside a byte array is:
-062 * codelt;keylengthgt;
lt;valuelengthgt; lt;keygt;
lt;valuegt;/code Key is further
-063 * decomposed as:
codelt;rowlengthgt; lt;rowgt;
lt;columnfamilylengthgt;
-064 * lt;columnfamilygt;
lt;columnqualifiergt;
-065 * lt;timestampgt;
lt;keytypegt;/code The coderowlength/code
maximum is
-066 *
codeShort.MAX_SIZE/code, column family length maximum is
codeByte.MAX_SIZE/code, and
-067 * column qualifier + key length must be
lt; codeInteger.MAX_SIZE/code. The column does not
-068 * contain the family/qualifier
delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
-069 * KeyValue can optionally contain Tags.
When it contains tags, it is added in the byte array after
-070 * the value part. The format for this
part is:
codelt;tagslengthgt;lt;tagsbytesgt;/code.
-071 * codetagslength/code
maximum is codeShort.MAX_SIZE/code. The
codetagsbytes/code
-072 * contain one or more tags where as each
tag is of the form
-073 *
codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
codetagtype/code is one byte
-074 * and codetaglength/code
maximum is codeShort.MAX_SIZE/code and it includes 1 byte
type
-075 * length and actual tag bytes length.
-076 */
-077@InterfaceAudience.Private
-078public class KeyValue implements
ExtendedCell {
-079 private static final
ArrayListTag EMPTY_ARRAY_LIST = new ArrayList();
-080
-081 private static final Log LOG =
LogFactory.getLog(KeyValue.class);
-082
-083 public static final long FIXED_OVERHEAD
= ClassSize.OBJECT + // the KeyValue object itself
-084 ClassSize.REFERENCE + // pointer to
"bytes"
-085 2 * Bytes.SIZEOF_INT + // offset,
length
-086 Bytes.SIZEOF_LONG;// memstoreTS
-087
-088 /**
-089 * Colon character in UTF-8
-090 */
-091 public static final char
COLUMN_FAMILY_DELIMITER = ':';
-092
-093 public static final byte[]
COLUMN_FAMILY_DELIM_ARRAY =
-094new
byte[]{COLUMN_FAMILY_DELIMITER};
-095
-096 /**
-097 * Comparator for plain key/values;
i.e. non-catalog table key/values. Works on Key portion
-098 * of KeyValue only.
-099 * @deprecated Use {@link
CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove for
hbase 3.0.
-100 */
-101 @Deprecated
-102 public static final KVComparator
COMPARATOR = new KVComparator();
-103 /**
-104 * A {@link KVComparator} for
codehbase:meta/code catalog table
-105 * {@link KeyValue}s.
-106 * @deprecated Use {@link
CellComparatorImpl#META_COMPARATOR} instead. Deprecated for hbase 2.0, remove
for hbase 3.0.
-107 */
-108 @Deprecated
-109 public static final KVComparator
META_COMPARATOR = new MetaComparator();
-110
-111 /** Size of the key length field in
bytes*/
-112 public static final int KEY_LENGTH_SIZE
= Bytes.SIZEOF_INT;
-113
-114 /** Size of the key type field in bytes
*/
-115 public
1 - 100 of 293 matches
Mail list logo
|