[01/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 44781de0d -> aa3fb87fc


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
index 652ff24..f1e8eec 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":9,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":10,"i50":10,"i51":10,"i52":9,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":9,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":10,"i50":10,"i51":10,"i52":9,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestFromClientSide
+public class TestFromClientSide
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Run tests that use the HBase clients; Table.
  Sets up the HBase mini cluster once at start and runs through all client 
tests.
@@ -667,124 +667,128 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testDeleteFamilyVersion()
+testCreateTableWithZeroRegionReplicas()
 
 
 void
-testDeleteFamilyVersionWithOtherDeletes()
+testDeleteFamilyVersion()
 
 
 void
-testDeleteLatestVersionOfSpecifiedColumn()
+testDeleteFamilyVersionWithOtherDeletes()
 
 
 void
-testDeletes()
+testDeleteLatestVersionOfSpecifiedColumn()
 
 
 void
-testDeleteSpecifiedVersionOfSpecifiedColumn()
+testDeletes()
 
 
 void
-testDeletesWithReverseScan()
+testDeleteSpecifiedVersionOfSpecifiedColumn()
 
 
 void
-testDeleteWithFailed()
+testDeletesWithReverseScan()
 
 
 void
+testDeleteWithFailed()
+
+
+void
 testDuplicateAppend()
 Test append result when there are duplicate rpc 
request.
 
 
-
+
 void
 testDuplicateVersions()
 
-
+
 void
 testEmptyFilterList()
 
-
+
 void
 testFilterAcrossMultipleRegions()
 Test filters when multiple regions.
 
 
-
+
 void
 testFilterAllRecords()
 
-
+
 void
 testFilters()
 
-
+
 void
 testFiltersWithReverseScan()
 
-
+
 

[01/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a66696eff -> 901d593a1


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
index 8cb3cab..dcc9348 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
@@ -552,7 +552,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo,
 postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListRSGroups,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot, postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetTableQuota, postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postTransitReplicationPeerSyncReplicationState,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup
 , preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTable,
 preCreateTableAction,
 preCreateTableRegionsInfos,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable, 
preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetRSGroupInfo,
 preGetRSGroupInfoOfServer,
 preGetRSGroupInfoOfTable,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers, preListRSGroups,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyNamespace, preModifyTable,
 preModifyTableAction,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer, preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServers,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTransitReplicationPeerSyncReplicationState,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 

[01/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b5ab7a738 -> 2bf59208a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 

[01/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 33046fea5 -> 849d84a8e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.html
index ba3df71..f983a2d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftSpnegoHttpServer.html
@@ -26,247 +26,242 @@
 018 */
 019package org.apache.hadoop.hbase.thrift;
 020
-021import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_KERBEROS_PRINCIPAL_KEY;
-022import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_KEYTAB_FILE_KEY;
-023import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_KEYTAB_FILE_KEY;
-024import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_PRINCIPAL_KEY;
-025import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SUPPORT_PROXYUSER_KEY;
-026import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.USE_HTTP_CONF_KEY;
-027import static 
org.junit.Assert.assertFalse;
-028import static 
org.junit.Assert.assertNotNull;
-029import static 
org.junit.Assert.assertTrue;
+021import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SUPPORT_PROXYUSER_KEY;
+022import static 
org.junit.Assert.assertFalse;
+023import static 
org.junit.Assert.assertNotNull;
+024import static 
org.junit.Assert.assertTrue;
+025
+026import java.io.File;
+027import java.security.Principal;
+028import 
java.security.PrivilegedExceptionAction;
+029import java.util.Set;
 030
-031import java.io.File;
-032import java.security.Principal;
-033import 
java.security.PrivilegedExceptionAction;
-034import java.util.Set;
-035
-036import javax.security.auth.Subject;
-037import 
javax.security.auth.kerberos.KerberosTicket;
-038
-039import org.apache.commons.io.FileUtils;
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-042import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-045import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-046import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-047import 
org.apache.hadoop.hbase.thrift.generated.Hbase;
-048import 
org.apache.hadoop.security.authentication.util.KerberosName;
-049import org.apache.http.HttpHeaders;
-050import 
org.apache.http.auth.AuthSchemeProvider;
-051import org.apache.http.auth.AuthScope;
-052import 
org.apache.http.auth.KerberosCredentials;
-053import 
org.apache.http.client.config.AuthSchemes;
-054import org.apache.http.config.Lookup;
-055import 
org.apache.http.config.RegistryBuilder;
-056import 
org.apache.http.impl.auth.SPNegoSchemeFactory;
-057import 
org.apache.http.impl.client.BasicCredentialsProvider;
-058import 
org.apache.http.impl.client.CloseableHttpClient;
-059import 
org.apache.http.impl.client.HttpClients;
-060import 
org.apache.kerby.kerberos.kerb.KrbException;
-061import 
org.apache.kerby.kerberos.kerb.client.JaasKrbUtil;
-062import 
org.apache.kerby.kerberos.kerb.server.SimpleKdcServer;
-063import 
org.apache.thrift.protocol.TBinaryProtocol;
-064import 
org.apache.thrift.protocol.TProtocol;
-065import 
org.apache.thrift.transport.THttpClient;
-066import org.ietf.jgss.GSSCredential;
-067import org.ietf.jgss.GSSManager;
-068import org.ietf.jgss.GSSName;
-069import org.ietf.jgss.Oid;
-070import org.junit.AfterClass;
-071import org.junit.BeforeClass;
-072import org.junit.ClassRule;
-073import 
org.junit.experimental.categories.Category;
-074import org.slf4j.Logger;
-075import org.slf4j.LoggerFactory;
-076
-077/**
-078 * Start the HBase Thrift HTTP server on 
a random port through the command-line
-079 * interface and talk to it from client 
side with SPNEGO security enabled.
-080 */
-081@Category({ClientTests.class, 
LargeTests.class})
-082public class TestThriftSpnegoHttpServer 
extends TestThriftHttpServer {
-083  @ClassRule
-084  public static final HBaseClassTestRule 
CLASS_RULE =
-085
HBaseClassTestRule.forClass(TestThriftSpnegoHttpServer.class);
-086
-087  private static final Logger LOG =
-088
LoggerFactory.getLogger(TestThriftSpnegoHttpServer.class);
+031import javax.security.auth.Subject;
+032import 
javax.security.auth.kerberos.KerberosTicket;
+033
+034import org.apache.commons.io.FileUtils;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+037import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 

[01/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 074c73629 -> ef0dd56df


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
 
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
index 7c0c94d..80bb54c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
@@ -445,6 +445,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 



[01/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9a5f7861d -> 09ea0d5f2


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 0f5a095..50bf692 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[01/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d0f540a20 -> 4f8b84247


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index f1c0bc3..88147e4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -610,585 +610,588 @@
 602  return false;
 603}
 604LOG.info("Processing expiration of " 
+ serverName + " on " + this.master.getServerName());
-605
master.getAssignmentManager().submitServerCrash(serverName, true);
-606
-607// Tell our listeners that a server 
was removed
-608if (!this.listeners.isEmpty()) {
-609  for (ServerListener listener : 
this.listeners) {
-610
listener.serverRemoved(serverName);
-611  }
-612}
-613// trigger a persist of 
flushedSeqId
-614if (flushedSeqIdFlusher != null) {
-615  flushedSeqIdFlusher.triggerNow();
-616}
-617return true;
-618  }
-619
-620  @VisibleForTesting
-621  public void 
moveFromOnlineToDeadServers(final ServerName sn) {
-622synchronized (onlineServers) {
-623  if 
(!this.onlineServers.containsKey(sn)) {
-624LOG.trace("Expiration of {} but 
server not online", sn);
-625  }
-626  // Remove the server from the known 
servers lists and update load info BUT
-627  // add to deadservers first; do 
this so it'll show in dead servers list if
-628  // not in online servers list.
-629  this.deadservers.add(sn);
-630  this.onlineServers.remove(sn);
-631  onlineServers.notifyAll();
-632}
-633this.rsAdmins.remove(sn);
-634  }
-635
-636  /*
-637   * Remove the server from the drain 
list.
-638   */
-639  public synchronized boolean 
removeServerFromDrainList(final ServerName sn) {
-640// Warn if the server (sn) is not 
online.  ServerName is of the form:
-641// hostname , port , 
startcode
-642
-643if (!this.isServerOnline(sn)) {
-644  LOG.warn("Server " + sn + " is not 
currently online. " +
-645   "Removing from draining 
list anyway, as requested.");
-646}
-647// Remove the server from the 
draining servers lists.
-648return 
this.drainingServers.remove(sn);
-649  }
-650
-651  /**
-652   * Add the server to the drain list.
-653   * @param sn
-654   * @return True if the server is added 
or the server is already on the drain list.
-655   */
-656  public synchronized boolean 
addServerToDrainList(final ServerName sn) {
-657// Warn if the server (sn) is not 
online.  ServerName is of the form:
-658// hostname , port , 
startcode
-659
-660if (!this.isServerOnline(sn)) {
-661  LOG.warn("Server " + sn + " is not 
currently online. " +
-662   "Ignoring request to add 
it to draining list.");
-663  return false;
-664}
-665// Add the server to the draining 
servers lists, if it's not already in
-666// it.
-667if 
(this.drainingServers.contains(sn)) {
-668  LOG.warn("Server " + sn + " is 
already in the draining server list." +
-669   "Ignoring request to add 
it again.");
-670  return true;
-671}
-672LOG.info("Server " + sn + " added to 
draining server list.");
-673return 
this.drainingServers.add(sn);
-674  }
-675
-676  // RPC methods to region servers
-677
-678  private HBaseRpcController 
newRpcController() {
-679return rpcControllerFactory == null ? 
null : rpcControllerFactory.newController();
-680  }
-681
-682  /**
-683   * Sends a WARMUP RPC to the specified 
server to warmup the specified region.
-684   * p
-685   * A region server could reject the 
close request because it either does not
-686   * have the specified region or the 
region is being split.
-687   * @param server server to warmup a 
region
-688   * @param region region to  warmup
-689   */
-690  public void sendRegionWarmup(ServerName 
server,
-691  RegionInfo region) {
-692if (server == null) return;
-693try {
-694  AdminService.BlockingInterface 
admin = getRsAdmin(server);
-695  HBaseRpcController controller = 
newRpcController();
-696  
ProtobufUtil.warmupRegion(controller, admin, region);
-697} catch (IOException e) {
-698  LOG.error("Received exception in 
RPC for warmup server:" +
-699server + "region: " + region +
-700"exception: " + e);
-701}
-702  }
-703
-704  /**
-705   * Contacts a region server and waits 
up to timeout ms
-706   * to close the region.  This bypasses 
the active hmaster.
-707   */
-708  public static void 
closeRegionSilentlyAndWait(ClusterConnection connection,
-709ServerName server, RegionInfo region, 
long timeout) throws IOException, InterruptedException {
-710

[01/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e46798831 -> 275553168


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If 

[01/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ba00a2513 -> 5299e6673


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 1c0118e..63362ed 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -1120,2858 +1120,2862 @@
 1112
getChoreService().scheduleChore(catalogJanitorChore);
 1113this.serverManager.startChore();
 1114
-1115// NAMESPACE READ
-1116// Here we expect hbase:namespace to 
be online. See inside initClusterSchemaService.
-1117// TODO: Fix this. Namespace is a 
pain being a sort-of system table. Fold it in to hbase:meta.
-1118// isNamespace does like isMeta and 
waits until namespace is onlined before allowing progress.
-1119if (!waitForNamespaceOnline()) {
-1120  return;
-1121}
-1122status.setStatus("Starting cluster 
schema service");
-1123initClusterSchemaService();
-1124
-1125if (this.cpHost != null) {
-1126  try {
-1127
this.cpHost.preMasterInitialization();
-1128  } catch (IOException e) {
-1129LOG.error("Coprocessor 
preMasterInitialization() hook failed", e);
-1130  }
-1131}
-1132
-1133status.markComplete("Initialization 
successful");
-1134LOG.info(String.format("Master has 
completed initialization %.3fsec",
-1135   (System.currentTimeMillis() - 
masterActiveTime) / 1000.0f));
-1136
this.masterFinishedInitializationTime = System.currentTimeMillis();
-1137
configurationManager.registerObserver(this.balancer);
-1138
configurationManager.registerObserver(this.hfileCleaner);
-1139
configurationManager.registerObserver(this.logCleaner);
-1140// Set master as 'initialized'.
-1141setInitialized(true);
-1142
-1143if (maintenanceMode) {
-1144  LOG.info("Detected repair mode, 
skipping final initialization steps.");
-1145  return;
-1146}
-1147
-1148
assignmentManager.checkIfShouldMoveSystemRegionAsync();
-1149status.setStatus("Assign meta 
replicas");
-1150MasterMetaBootstrap metaBootstrap = 
createMetaBootstrap();
-1151
metaBootstrap.assignMetaReplicas();
-1152status.setStatus("Starting quota 
manager");
-1153initQuotaManager();
-1154if (QuotaUtil.isQuotaEnabled(conf)) 
{
-1155  // Create the quota snapshot 
notifier
-1156  spaceQuotaSnapshotNotifier = 
createQuotaSnapshotNotifier();
-1157  
spaceQuotaSnapshotNotifier.initialize(getClusterConnection());
-1158  this.quotaObserverChore = new 
QuotaObserverChore(this, getMasterMetrics());
-1159  // Start the chore to read the 
region FS space reports and act on them
-1160  
getChoreService().scheduleChore(quotaObserverChore);
-1161
-1162  this.snapshotQuotaChore = new 
SnapshotQuotaObserverChore(this, getMasterMetrics());
-1163  // Start the chore to read 
snapshots and add their usage to table/NS quotas
-1164  
getChoreService().scheduleChore(snapshotQuotaChore);
-1165}
-1166
-1167// clear the dead servers with same 
host name and port of online server because we are not
-1168// removing dead server with same 
hostname and port of rs which is trying to check in before
-1169// master initialization. See 
HBASE-5916.
-1170
this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
-1171
-1172// Check and set the znode ACLs if 
needed in case we are overtaking a non-secure configuration
-1173status.setStatus("Checking ZNode 
ACLs");
-1174zooKeeper.checkAndSetZNodeAcls();
+1115// Only for rolling upgrade, where 
we need to migrate the data in namespace table to meta table.
+1116if (!waitForNamespaceOnline()) {
+1117  return;
+1118}
+1119status.setStatus("Starting cluster 
schema service");
+1120initClusterSchemaService();
+1121
+1122if (this.cpHost != null) {
+1123  try {
+1124
this.cpHost.preMasterInitialization();
+1125  } catch (IOException e) {
+1126LOG.error("Coprocessor 
preMasterInitialization() hook failed", e);
+1127  }
+1128}
+1129
+1130status.markComplete("Initialization 
successful");
+1131LOG.info(String.format("Master has 
completed initialization %.3fsec",
+1132   (System.currentTimeMillis() - 
masterActiveTime) / 1000.0f));
+1133
this.masterFinishedInitializationTime = System.currentTimeMillis();
+1134
configurationManager.registerObserver(this.balancer);
+1135
configurationManager.registerObserver(this.hfileCleaner);
+1136
configurationManager.registerObserver(this.logCleaner);
+1137

[01/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 733988183 -> 68eae6230


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
index 5062e9b..23b4be7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
@@ -282,7 +282,7 @@
 274  public static void tearDownAfterClass() 
throws Exception {
 275cleanUp();
 276TEST_UTIL.shutdownMiniCluster();
-277int total = 
TableAuthManager.getTotalRefCount();
+277int total = 
AuthManager.getTotalRefCount();
 278assertTrue("Unexpected reference 
count: " + total, total == 0);
 279  }
 280
@@ -1642,12 +1642,12 @@
 1634  }
 1635
 1636  UserPermission ownerperm =
-1637  new 
UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, 
Action.values());
+1637  new 
UserPermission(USER_OWNER.getName(), tableName, Action.values());
 1638  assertTrue("Owner should have all 
permissions on table",
 1639
hasFoundUserPermission(ownerperm, perms));
 1640
 1641  User user = 
User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new 
String[0]);
-1642  byte[] userName = 
Bytes.toBytes(user.getShortName());
+1642  String userName = 
user.getShortName();
 1643
 1644  UserPermission up =
 1645  new UserPermission(userName, 
tableName, family1, qualifier, Permission.Action.READ);
@@ -1733,7 +1733,7 @@
 1725  }
 1726
 1727  UserPermission newOwnerperm =
-1728  new 
UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, 
Action.values());
+1728  new 
UserPermission(newOwner.getName(), tableName, Action.values());
 1729  assertTrue("New owner should have 
all permissions on table",
 1730
hasFoundUserPermission(newOwnerperm, perms));
 1731} finally {
@@ -1757,1888 +1757,1898 @@
 1749
 1750CollectionString superUsers 
= Superusers.getSuperUsers();
 1751ListUserPermission 
adminPerms = new ArrayList(superUsers.size() + 1);
-1752adminPerms.add(new 
UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
-1753  AccessControlLists.ACL_TABLE_NAME, 
null, null, Bytes.toBytes("ACRW")));
-1754
-1755for(String user: superUsers) {
-1756  adminPerms.add(new 
UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-1757  null, null, 
Action.values()));
-1758}
-1759assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
-1760"per setup", perms.size() == 5 + 
superUsers.size() 
-1761
hasFoundUserPermission(adminPerms, perms));
-1762  }
-1763
-1764  /** global operations */
-1765  private void 
verifyGlobal(AccessTestAction action) throws Exception {
-1766verifyAllowed(action, SUPERUSER);
-1767
-1768verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
-1769  }
-1770
-1771  @Test
-1772  public void testCheckPermissions() 
throws Exception {
-1773// 
--
-1774// test global permissions
-1775AccessTestAction globalAdmin = new 
AccessTestAction() {
-1776  @Override
-1777  public Void run() throws Exception 
{
-1778checkGlobalPerms(TEST_UTIL, 
Permission.Action.ADMIN);
-1779return null;
-1780  }
-1781};
-1782// verify that only superuser can 
admin
-1783verifyGlobal(globalAdmin);
-1784
-1785// 
--
-1786// test multiple permissions
-1787AccessTestAction globalReadWrite = 
new AccessTestAction() {
-1788  @Override
-1789  public Void run() throws Exception 
{
-1790checkGlobalPerms(TEST_UTIL, 
Permission.Action.READ, Permission.Action.WRITE);
-1791return null;
-1792  }
-1793};
+1752adminPerms.add(new 
UserPermission(USER_ADMIN.getShortName(), Bytes.toBytes("ACRW")));
+1753for(String user: superUsers) {
+1754  // Global permission
+1755  adminPerms.add(new 
UserPermission(user, Action.values()));
+1756}
+1757assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
+1758"per setup", perms.size() == 5 + 
superUsers.size() 
+1759
hasFoundUserPermission(adminPerms, perms));
+1760  }
+1761
+1762  /** global operations */
+1763  private void 
verifyGlobal(AccessTestAction action) throws Exception {
+1764verifyAllowed(action, SUPERUSER);
+1765
+1766verifyDenied(action, USER_CREATE, 
USER_RW, 

[01/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3052d5260 -> b5e107c34


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedureWithEvent.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedureWithEvent.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedureWithEvent.html
index c9e0e55..f1b9105 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedureWithEvent.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedureWithEvent.html
@@ -37,157 +37,157 @@
 029import java.util.Arrays;
 030import java.util.List;
 031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HRegionInfo;
-033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.RegionInfo;
-036import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-037import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-038import 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
-039import 
org.apache.hadoop.hbase.procedure2.LockType;
-040import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-041import 
org.apache.hadoop.hbase.procedure2.LockedResourceType;
-042import 
org.apache.hadoop.hbase.procedure2.Procedure;
-043import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import org.junit.After;
-049import org.junit.Before;
-050import org.junit.ClassRule;
-051import org.junit.Rule;
-052import org.junit.Test;
-053import 
org.junit.experimental.categories.Category;
-054import org.junit.rules.TestName;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058@Category({MasterTests.class, 
SmallTests.class})
-059public class TestMasterProcedureScheduler 
{
-060
-061  @ClassRule
-062  public static final HBaseClassTestRule 
CLASS_RULE =
-063  
HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class);
-064
-065  private static final Logger LOG = 
LoggerFactory.getLogger(TestMasterProcedureScheduler.class);
-066
-067  private MasterProcedureScheduler 
queue;
-068
-069  @Rule
-070  public TestName name = new 
TestName();
-071
-072  @Before
-073  public void setUp() throws IOException 
{
-074queue = new 
MasterProcedureScheduler();
-075queue.start();
-076  }
-077
-078  @After
-079  public void tearDown() throws 
IOException {
-080assertEquals("proc-queue expected to 
be empty", 0, queue.size());
-081queue.stop();
-082queue.clear();
-083  }
-084
-085  /**
-086   * Verify simple 
create/insert/fetch/delete of the table queue.
-087   */
-088  @Test
-089  public void testSimpleTableOpsQueues() 
throws Exception {
-090final int NUM_TABLES = 10;
-091final int NUM_ITEMS = 10;
-092
-093int count = 0;
-094for (int i = 1; i = NUM_TABLES; 
++i) {
-095  TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-096  // insert items
-097  for (int j = 1; j = NUM_ITEMS; 
++j) {
-098queue.addBack(new 
TestTableProcedure(i * 1000 + j, tableName,
-099  
TableProcedureInterface.TableOperationType.EDIT));
-100assertEquals(++count, 
queue.size());
-101  }
-102}
-103assertEquals(NUM_TABLES * NUM_ITEMS, 
queue.size());
-104
-105for (int j = 1; j = NUM_ITEMS; 
++j) {
-106  for (int i = 1; i = NUM_TABLES; 
++i) {
-107Procedure proc = queue.poll();
-108assertTrue(proc != null);
-109TableName tableName = 
((TestTableProcedure)proc).getTableName();
-110
queue.waitTableExclusiveLock(proc, tableName);
-111
queue.wakeTableExclusiveLock(proc, tableName);
-112queue.completionCleanup(proc);
-113assertEquals(--count, 
queue.size());
-114assertEquals(i * 1000 + j, 
proc.getProcId());
-115  }
-116}
-117assertEquals(0, queue.size());
-118
-119for (int i = 1; i = NUM_TABLES; 
++i) {
-120  final TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-121  final TestTableProcedure dummyProc 
= new TestTableProcedure(100, tableName,
-122
TableProcedureInterface.TableOperationType.DELETE);
-123  // complete the table deletion
-124  
assertTrue(queue.markTableAsDeleted(tableName, dummyProc));
-125}
-126  }
-127
-128  /**
-129   * Check that the 

[01/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 02f92ca7d -> 8f09a71db


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
index 3ec2d53..9400177 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.html
@@ -33,390 +33,433 @@
 025import java.util.HashMap;
 026import java.util.List;
 027import java.util.Map;
-028
-029import 
org.apache.hadoop.conf.Configuration;
-030import org.apache.hadoop.fs.FileStatus;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.FileUtil;
-033import org.apache.hadoop.fs.Path;
-034import 
org.apache.hadoop.fs.permission.FsPermission;
-035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.TableName;
-038import 
org.apache.hadoop.hbase.client.Connection;
-039import 
org.apache.hadoop.hbase.ipc.RpcServer;
-040import 
org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
-041import 
org.apache.hadoop.hbase.security.User;
-042import 
org.apache.hadoop.hbase.security.UserProvider;
-043import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-044import 
org.apache.hadoop.hbase.security.token.TokenUtil;
-045import 
org.apache.hadoop.hbase.util.Bytes;
-046import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import 
org.apache.hadoop.hbase.util.Methods;
-049import 
org.apache.hadoop.hbase.util.Pair;
-050import org.apache.hadoop.io.Text;
-051import 
org.apache.hadoop.security.UserGroupInformation;
-052import 
org.apache.hadoop.security.token.Token;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-060
-061/**
-062 * Bulk loads in secure mode.
-063 *
-064 * This service addresses two issues:
-065 * ol
-066 * liMoving files in a secure 
filesystem wherein the HBase Client
-067 * and HBase Server are different 
filesystem users./li
-068 * liDoes moving in a secure 
manner. Assuming that the filesystem
-069 * is POSIX compliant./li
-070 * /ol
-071 *
-072 * The algorithm is as follows:
-073 * ol
-074 * liCreate an hbase owned 
staging directory which is
-075 * world traversable (711): {@code 
/hbase/staging}/li
-076 * liA user writes out data to 
his secure output directory: {@code /user/foo/data}/li
-077 * liA call is made to hbase to 
create a secret staging directory
-078 * which globally rwx (777): {@code 
/user/staging/averylongandrandomdirectoryname}/li
-079 * liThe user moves the data into 
the random staging directory,
-080 * then calls 
bulkLoadHFiles()/li
-081 * /ol
-082 *
-083 * Like delegation tokens the strength of 
the security lies in the length
-084 * and randomness of the secret 
directory.
-085 *
-086 */
-087@InterfaceAudience.Private
-088public class SecureBulkLoadManager {
-089
-090  public static final long VERSION = 
0L;
-091
-092  //320/5 = 64 characters
-093  private static final int RANDOM_WIDTH = 
320;
-094  private static final int RANDOM_RADIX = 
32;
+028import 
java.util.concurrent.ConcurrentHashMap;
+029import java.util.function.BiFunction;
+030import java.util.function.Consumer;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileStatus;
+034import org.apache.hadoop.fs.FileSystem;
+035import org.apache.hadoop.fs.FileUtil;
+036import org.apache.hadoop.fs.Path;
+037import 
org.apache.hadoop.fs.permission.FsPermission;
+038import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.ipc.RpcServer;
+043import 
org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
+044import 
org.apache.hadoop.hbase.security.User;
+045import 
org.apache.hadoop.hbase.security.UserProvider;
+046import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
+047import 
org.apache.hadoop.hbase.security.token.TokenUtil;
+048import 
org.apache.hadoop.hbase.util.Bytes;
+049import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
+050import 

[01/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 383964c63 -> 425db2304


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.PutThread.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.PutThread.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.PutThread.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.PutThread.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.PutThread.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = 

[01/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ba3ba8b2e -> c9ebe6860


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.RowTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.RowTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.RowTracker.html
index 3efe1f0..3b88f1d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.RowTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.RowTracker.html
@@ -43,629 +43,628 @@
 035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
 036import 
org.apache.hadoop.hbase.util.Bytes;
 037import 
org.apache.hadoop.hbase.util.Pair;
-038import 
org.apache.hadoop.hbase.util.UnsafeAccess;
-039import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-040
-041import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-042
-043/**
-044 * This is optimized version of a 
standard FuzzyRowFilter Filters data based on fuzzy row key.
-045 * Performs fast-forwards during 
scanning. It takes pairs (row key, fuzzy info) to match row keys.
-046 * Where fuzzy info is a byte array with 
0 or 1 as its values:
-047 * ul
-048 * li0 - means that this byte in 
provided row key is fixed, i.e. row key's byte at same position
-049 * must match/li
-050 * li1 - means that this byte in 
provided row key is NOT fixed, i.e. row key's byte at this
-051 * position can be different from the one 
in provided row key/li
-052 * /ul
-053 * Example: Let's assume row key format 
is userId_actionId_year_month. Length of userId is fixed and
-054 * is 4, length of actionId is 2 and year 
and month are 4 and 2 bytes long respectively. Let's
-055 * assume that we need to fetch all users 
that performed certain action (encoded as "99") in Jan of
-056 * any year. Then the pair (row key, 
fuzzy info) would be the following: row key = "_99__01"
-057 * (one can use any value instead of "?") 
fuzzy info =
-058 * 
"\x01\x01\x01\x01\x00\x00\x00\x00\x01\x01\x01\x01\x00\x00\x00" I.e. fuzzy info 
tells the matching
-059 * mask is "_99__01", where at ? 
can be any value.
-060 */
-061@InterfaceAudience.Public
-062public class FuzzyRowFilter extends 
FilterBase {
-063  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-064  private ListPairbyte[], 
byte[] fuzzyKeysData;
-065  private boolean done = false;
-066
-067  /**
-068   * The index of a last successfully 
found matching fuzzy string (in fuzzyKeysData). We will start
-069   * matching next KV with this one. If 
they do not match then we will return back to the one-by-one
-070   * iteration over fuzzyKeysData.
-071   */
-072  private int lastFoundIndex = -1;
-073
-074  /**
-075   * Row tracker (keeps all next rows 
after SEEK_NEXT_USING_HINT was returned)
-076   */
-077  private RowTracker tracker;
-078
-079  public 
FuzzyRowFilter(ListPairbyte[], byte[] fuzzyKeysData) {
-080ListPairbyte[], 
byte[] fuzzyKeyDataCopy = new 
ArrayList(fuzzyKeysData.size());
-081
-082for (Pairbyte[], byte[] 
aFuzzyKeysData : fuzzyKeysData) {
-083  if 
(aFuzzyKeysData.getFirst().length != aFuzzyKeysData.getSecond().length) {
-084PairString, String 
readable =
-085  new 
Pair(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), 
Bytes.toStringBinary(aFuzzyKeysData.getSecond()));
-086throw new 
IllegalArgumentException("Fuzzy pair lengths do not match: " + readable);
-087  }
-088
-089  Pairbyte[], byte[] p = new 
Pair();
-090  // create a copy of pair bytes so 
that they are not modified by the filter.
-091  
p.setFirst(Arrays.copyOf(aFuzzyKeysData.getFirst(), 
aFuzzyKeysData.getFirst().length));
-092  
p.setSecond(Arrays.copyOf(aFuzzyKeysData.getSecond(), 
aFuzzyKeysData.getSecond().length));
-093
-094  // update mask ( 0 - -1 (0xff), 
1 - 2)
-095  
p.setSecond(preprocessMask(p.getSecond()));
-096  preprocessSearchKey(p);
-097
-098  fuzzyKeyDataCopy.add(p);
-099}
-100this.fuzzyKeysData = 
fuzzyKeyDataCopy;
-101this.tracker = new RowTracker();
-102  }
+038import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
+039
+040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+041
+042/**
+043 * This is optimized version of a 
standard FuzzyRowFilter Filters data based on fuzzy row key.
+044 * Performs fast-forwards during 
scanning. It takes pairs (row key, fuzzy info) to match row keys.
+045 * Where fuzzy info is a byte array with 
0 or 1 as its values:
+046 * ul
+047 * li0 - means that this byte in 
provided row key is fixed, i.e. row key's byte at same position
+048 * must match/li
+049 * li1 - means that this byte in 
provided row key is NOT fixed, i.e. row key's byte at this
+050 * position 

[01/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1a9895d8b -> 323b17d93


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
index edb675e..eb90a1f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
@@ -408,184 +408,224 @@
 400}
 401  }
 402
-403  public static class TestProcedure 
extends NoopProcedureVoid {
-404private byte[] data = null;
-405
-406public TestProcedure() {}
+403  public static class 
NoopStateMachineProcedureTEnv, TState
+404  extends 
StateMachineProcedureTEnv, TState {
+405private TState initialState;
+406private TEnv env;
 407
-408public TestProcedure(long procId) {
-409  this(procId, 0);
-410}
-411
-412public TestProcedure(long procId, 
long parentId) {
-413  this(procId, parentId, null);
+408public NoopStateMachineProcedure() 
{
+409}
+410
+411public NoopStateMachineProcedure(TEnv 
env, TState initialState) {
+412  this.env = env;
+413  this.initialState = initialState;
 414}
 415
-416public TestProcedure(long procId, 
long parentId, byte[] data) {
-417  this(procId, parentId, parentId, 
data);
-418}
-419
-420public TestProcedure(long procId, 
long parentId, long rootId, byte[] data) {
-421  setData(data);
-422  setProcId(procId);
-423  if (parentId  0) {
-424setParentProcId(parentId);
-425  }
-426  if (rootId  0 || parentId  
0) {
-427setRootProcId(rootId);
-428  }
-429}
-430
-431public void addStackId(final int 
index) {
-432  addStackIndex(index);
-433}
-434
-435public void setSuccessState() {
-436  setState(ProcedureState.SUCCESS);
-437}
-438
-439public void setData(final byte[] 
data) {
-440  this.data = data;
-441}
+416@Override
+417protected Flow executeFromState(TEnv 
env, TState tState)
+418throws 
ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+419  return null;
+420}
+421
+422@Override
+423protected void rollbackState(TEnv 
env, TState tState) throws IOException, InterruptedException {
+424
+425}
+426
+427@Override
+428protected TState getState(int 
stateId) {
+429  return null;
+430}
+431
+432@Override
+433protected int getStateId(TState 
tState) {
+434  return 0;
+435}
+436
+437@Override
+438protected TState getInitialState() 
{
+439  return initialState;
+440}
+441  }
 442
-443@Override
-444protected void 
serializeStateData(ProcedureStateSerializer serializer)
-445throws IOException {
-446  ByteString dataString = 
ByteString.copyFrom((data == null) ? new byte[0] : data);
-447  BytesValue.Builder builder = 
BytesValue.newBuilder().setValue(dataString);
-448  
serializer.serialize(builder.build());
-449}
-450
-451@Override
-452protected void 
deserializeStateData(ProcedureStateSerializer serializer)
-453throws IOException {
-454  BytesValue bytesValue = 
serializer.deserialize(BytesValue.class);
-455  ByteString dataString = 
bytesValue.getValue();
-456
-457  if (dataString.isEmpty()) {
-458data = null;
-459  } else {
-460data = 
dataString.toByteArray();
-461  }
-462}
-463
-464// Mark acquire/release lock 
functions public for test uses.
-465@Override
-466public LockState acquireLock(Void 
env) {
-467  return LockState.LOCK_ACQUIRED;
-468}
-469
-470@Override
-471public void releaseLock(Void env) {
-472  // no-op
+443  public static class TestProcedure 
extends NoopProcedureVoid {
+444private byte[] data = null;
+445
+446public TestProcedure() {}
+447
+448public TestProcedure(long procId) {
+449  this(procId, 0);
+450}
+451
+452public TestProcedure(long procId, 
long parentId) {
+453  this(procId, parentId, null);
+454}
+455
+456public TestProcedure(long procId, 
long parentId, byte[] data) {
+457  this(procId, parentId, parentId, 
data);
+458}
+459
+460public TestProcedure(long procId, 
long parentId, long rootId, byte[] data) {
+461  setData(data);
+462  setProcId(procId);
+463  if (parentId  0) {
+464setParentProcId(parentId);
+465  }
+466  if (rootId  0 || parentId  
0) {
+467setRootProcId(rootId);
+468  }
+469}
+470
+471public void addStackId(final int 
index) {
+472  addStackIndex(index);
 

[01/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9ab80c17b -> d13418593


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestForceUpdateProcedure.WaitingProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestForceUpdateProcedure.WaitingProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestForceUpdateProcedure.WaitingProcedure.html
new file mode 100644
index 000..a34c209
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestForceUpdateProcedure.WaitingProcedure.html
@@ -0,0 +1,412 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestForceUpdateProcedure.WaitingProcedure (Apache HBase 3.0.0-SNAPSHOT 
Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2.store.wal
+Class TestForceUpdateProcedure.WaitingProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+org.apache.hadoop.hbase.procedure2.store.wal.TestForceUpdateProcedure.WaitingProcedure
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 
java.lang">Comparableorg.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+Enclosing class:
+TestForceUpdateProcedure
+
+
+
+public static final class TestForceUpdateProcedure.WaitingProcedure
+extends org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID, NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+WaitingProcedure()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected boolean
+abort(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+protected void
+deserializeStateData(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializerserializer)
+
+
+protected 
org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void[]
+execute(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+protected void
+rollback(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+protected void
+serializeStateData(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializerserializer)
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+acquireLock, addStackIndex, afterReplay, beforeReplay, compareTo, 
completionCleanup, doExecute, doRollback, elapsedTime, getChildrenLatch, 
getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, 
getProcedureMetrics, getProcId, getProcIdHashCode, getProcName, getResult, 
getRootProcedureId, getRootProcId, 

[01/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 2dd584033 -> 419d03380


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.OpenFileDescriptorResourceAnalyzer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.OpenFileDescriptorResourceAnalyzer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.OpenFileDescriptorResourceAnalyzer.html
index 47aac2c..cb4c472 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.OpenFileDescriptorResourceAnalyzer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.OpenFileDescriptorResourceAnalyzer.html
@@ -26,221 +26,179 @@
 018
 019package org.apache.hadoop.hbase;
 020
-021import 
java.lang.management.ManagementFactory;
-022import 
java.lang.management.MemoryUsage;
-023import java.util.ArrayList;
-024import java.util.HashSet;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.Set;
-028import 
java.util.concurrent.ConcurrentHashMap;
-029import java.util.concurrent.TimeUnit;
-030import 
org.apache.hadoop.hbase.ResourceChecker.Phase;
-031import 
org.apache.hadoop.hbase.util.JVM;
-032import 
org.junit.runner.notification.RunListener;
-033
-034/**
-035 * Listen to the test progress and check 
the usage of:
-036 * ul
-037 * lithreads/li
-038 * liopen file 
descriptor/li
-039 * limax open file 
descriptor/li
-040 * /ul
-041 * p
-042 * When surefire 
forkMode=once/always/perthread, this code is executed on the forked process.
-043 */
-044public class ResourceCheckerJUnitListener 
extends RunListener {
-045  private MapString, 
ResourceChecker rcs = new ConcurrentHashMap();
-046
-047  static class ThreadResourceAnalyzer 
extends ResourceChecker.ResourceAnalyzer {
-048private static SetString 
initialThreadNames = new HashSet();
-049private static ListString 
stringsToLog = null;
-050
-051@Override
-052public int getVal(Phase phase) {
-053  MapThread, 
StackTraceElement[] stackTraces = Thread.getAllStackTraces();
-054  if (phase == Phase.INITIAL) {
-055stringsToLog = null;
-056for (Thread t : 
stackTraces.keySet()) {
-057  
initialThreadNames.add(t.getName());
-058}
-059  } else if (phase == Phase.END) {
-060if (stackTraces.size()  
initialThreadNames.size()) {
-061  stringsToLog = new 
ArrayList();
-062  for (Thread t : 
stackTraces.keySet()) {
-063if 
(!initialThreadNames.contains(t.getName())) {
-064  
stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n");
-065  StackTraceElement[] 
stackElements = stackTraces.get(t);
-066  for (StackTraceElement ele 
: stackElements) {
-067stringsToLog.add("\t" + 
ele + "\n");
-068  }
-069}
-070  }
-071}
-072  }
-073  return stackTraces.size();
-074}
-075
-076@Override
-077public int getMax() {
-078  return 500;
-079}
-080
-081@Override
-082public ListString 
getStringsToLog() {
-083  return stringsToLog;
-084}
-085  }
-086
-087
-088  static class 
OpenFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-089@Override
-090public int getVal(Phase phase) {
-091  if (!JVM.isUnix()) {
-092return 0;
-093  }
-094  JVM jvm = new JVM();
-095  return (int) 
jvm.getOpenFileDescriptorCount();
-096}
-097
-098@Override
-099public int getMax() {
-100  return 1024;
-101}
-102  }
-103
-104  static class 
MaxFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-105@Override
-106public int getVal(Phase phase) {
-107  if (!JVM.isUnix()) {
-108return 0;
-109  }
-110  JVM jvm = new JVM();
-111  return (int) 
jvm.getMaxFileDescriptorCount();
-112}
-113  }
-114
-115  static class 
SystemLoadAverageResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-116@Override
-117public int getVal(Phase phase) {
-118  if (!JVM.isUnix()) {
-119return 0;
-120  }
-121  return (int) (new 
JVM().getSystemLoadAverage() * 100);
-122}
-123  }
-124
-125  static class 
ProcessCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-126@Override
-127public int getVal(Phase phase) {
-128  if (!JVM.isUnix()) {
-129return 0;
-130  }
-131  return new 
JVM().getNumberOfRunningProcess();
-132}
-133  }
-134
-135  static class 
AvailableMemoryMBResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-136@Override
-137public int getVal(Phase phase) {
-138  if (!JVM.isUnix()) {
-139return 0;
-140  }
-141  return (int) (new 
JVM().getFreeMemory() / (1024L * 1024L));

[01/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 2a4120d7e -> 37cf49a66


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we 

[01/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e4b87e9ee -> f6f9d4f3e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
index 804ef45..e999ddb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
@@ -138,2491 +138,2492 @@
 130  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
 131  public static final int 
ESTIMATED_HEAP_TAX = 16;
 132
-133  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-134
-135  /**
-136   * Returns length of the byte array, 
returning 0 if the array is null.
-137   * Useful for calculating sizes.
-138   * @param b byte array, which can be 
null
-139   * @return 0 if b is null, otherwise 
returns length
-140   */
-141  final public static int len(byte[] b) 
{
-142return b == null ? 0 : b.length;
-143  }
-144
-145  private byte[] bytes;
-146  private int offset;
-147  private int length;
-148
-149  /**
-150   * Create a zero-size sequence.
-151   */
-152  public Bytes() {
-153super();
-154  }
-155
-156  /**
-157   * Create a Bytes using the byte array 
as the initial value.
-158   * @param bytes This array becomes the 
backing storage for the object.
-159   */
-160  public Bytes(byte[] bytes) {
-161this(bytes, 0, bytes.length);
-162  }
-163
-164  /**
-165   * Set the new Bytes to the contents of 
the passed
-166   * codeibw/code.
-167   * @param ibw the value to set this 
Bytes to.
-168   */
-169  public Bytes(final Bytes ibw) {
-170this(ibw.get(), ibw.getOffset(), 
ibw.getLength());
-171  }
-172
-173  /**
-174   * Set the value to a given byte 
range
-175   * @param bytes the new byte range to 
set to
-176   * @param offset the offset in newData 
to start at
-177   * @param length the number of bytes in 
the range
-178   */
-179  public Bytes(final byte[] bytes, final 
int offset,
-180  final int length) {
-181this.bytes = bytes;
-182this.offset = offset;
-183this.length = length;
-184  }
-185
-186  /**
-187   * Copy bytes from ByteString 
instance.
-188   * @param byteString copy from
-189   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0.
-190   */
-191  @Deprecated
-192  public Bytes(final ByteString 
byteString) {
-193this(byteString.toByteArray());
-194  }
-195
-196  /**
-197   * Get the data from the Bytes.
-198   * @return The data is only valid 
between offset and offset+length.
-199   */
-200  public byte [] get() {
-201if (this.bytes == null) {
-202  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-203  "called w/o accompaying 
readFields invocation");
-204}
-205return this.bytes;
-206  }
-207
-208  /**
-209   * @param b Use passed bytes as backing 
array for this instance.
-210   */
-211  public void set(final byte [] b) {
-212set(b, 0, b.length);
-213  }
-214
-215  /**
-216   * @param b Use passed bytes as backing 
array for this instance.
-217   * @param offset
-218   * @param length
-219   */
-220  public void set(final byte [] b, final 
int offset, final int length) {
-221this.bytes = b;
-222this.offset = offset;
-223this.length = length;
-224  }
-225
-226  /**
-227   * @return the number of valid bytes in 
the buffer
-228   * @deprecated use {@link #getLength()} 
instead
-229   */
-230  @Deprecated
-231  public int getSize() {
-232if (this.bytes == null) {
-233  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-234  "called w/o accompaying 
readFields invocation");
-235}
-236return this.length;
-237  }
-238
-239  /**
-240   * @return the number of valid bytes in 
the buffer
-241   */
-242  public int getLength() {
-243if (this.bytes == null) {
-244  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-245  "called w/o accompaying 
readFields invocation");
-246}
-247return this.length;
-248  }
-249
-250  /**
-251   * @return offset
-252   */
-253  public int getOffset(){
-254return this.offset;
-255  }
-256
-257  /**
-258   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0.
-259   */
-260  @Deprecated
-261  public ByteString toByteString() {
-262return 
ByteString.copyFrom(this.bytes, this.offset, this.length);
-263  }
-264
-265  @Override
-266  public int hashCode() {
-267return Bytes.hashCode(bytes, offset, 
length);
-268  }
-269
-270  /**
-271   * Define the sort order of the 
Bytes.
-272   * @param that The other bytes 
writable
-273   * @return Positive if left is bigger 
than right, 0 if they are equal, and
-274   * negative if left is 

[01/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a5dc1229e -> fa1bebf86


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index ef2c9e4..91f1930 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -879,385 +879,389 @@
 
 
 
-TestHTableMultiplexer
+TestHbck.SuspendProcedure
 
 
 
-TestHTableMultiplexerFlushCache
+TestHTableMultiplexer
 
 
 
-TestHTableMultiplexerViaMocks
+TestHTableMultiplexerFlushCache
 
 
 
-TestIllegalTableDescriptor
+TestHTableMultiplexerViaMocks
 
 
 
-TestImmutableHColumnDescriptor
+TestIllegalTableDescriptor
 
 
 
+TestImmutableHColumnDescriptor
+
+
+
 TestImmutableHRegionInfo
 
 Test ImmutableHRegionInfo
 
 
-
+
 TestImmutableHTableDescriptor
 
 
-
+
 TestIncrement
 
 
-
+
 TestIncrementFromClientSideWithCoprocessor
 
 Test all Increment client operations with a 
coprocessor that
  just implements the default flush/compact/scan policy.
 
 
-
+
 TestIncrementsFromClientSide
 
 Run Increment tests that use the HBase clients; 
HTable.
 
 
-
+
 TestInterfaceAlign
 
 
-
+
 TestIntraRowPagination
 
 Test scan/get offset and limit settings within one row 
through HRegion API.
 
 
-
+
 TestLeaseRenewal
 
 
-
+
 TestLimitedScanWithFilter
 
 With filter we may stop at a middle of row and think that 
we still have more cells for the
  current row but actually all the remaining cells will be filtered out by the 
filter.
 
 
-
+
 TestMalformedCellFromClient
 
 The purpose of this test is to ensure whether rs deals with 
the malformed cells correctly.
 
 
-
+
 TestMetaCache
 
 
-
+
 TestMetaCache.CallQueueTooBigExceptionInjector
 
 Throws CallQueueTooBigException for all gets.
 
 
-
+
 TestMetaCache.ExceptionInjector
 
 
-
+
 TestMetaCache.FakeRSRpcServices
 
 
-
+
 TestMetaCache.RegionServerWithFakeRpcServices
 
 
-
+
 TestMetaCache.RoundRobinExceptionInjector
 
 Rotates through the possible cache clearing and non-cache 
clearing exceptions
  for requests.
 
 
-
+
 TestMetaWithReplicas
 
 Tests the scenarios where replicas are enabled for the meta 
table
 
 
-
+
 TestMetricsConnection
 
 
-
+
 TestMobCloneSnapshotFromClient
 
 Test clone snapshots from the client
 
 
-
+
 TestMobCloneSnapshotFromClient.DelayFlushCoprocessor
 
 This coprocessor is used to delay the flush.
 
 
-
+
 TestMobRestoreSnapshotFromClient
 
 Test restore snapshots from the client
 
 
-
+
 TestMobSnapshotCloneIndependence
 
 Test to verify that the cloned table is independent of the 
table from which it was cloned
 
 
-
+
 TestMobSnapshotFromClient
 
 Test create/using/deleting snapshots from the client
 
 
-
+
 TestMultiParallel
 
 
-
+
 TestMultiParallel.MyMasterObserver
 
 
-
+
 TestMultipleTimestamps
 
 Run tests related to TimestampsFilter using 
HBase client APIs.
 
 
-
+
 TestMultiRespectsLimits
 
 This test sets the multi size WAY low and then checks 
to make sure that gets will still make
  progress.
 
 
-
+
 TestMutation
 
 
-
+
 TestMvccConsistentScanner
 
 
-
+
 TestOperation
 
 Run tests that use the functionality of the Operation 
superclass for
  Puts, Gets, Deletes, Scans, and MultiPuts.
 
 
-
+
 TestProcedureFuture
 
 
-
+
 TestProcedureFuture.TestFuture
 
 
-
+
 TestPutDeleteEtcCellIteration
 
 Test that I can Iterate Client Actions that hold Cells (Get 
does not have Cells).
 
 
-
+
 TestPutDotHas
 
 
-
+
 TestPutWithDelete
 
 
-
+
 TestPutWriteToWal
 
 
-
+
 TestQuotasShell
 
 
-
+
 TestRawAsyncScanCursor
 
 
-
+
 TestRawAsyncTableLimitedScanWithFilter
 
 With filter we may stop at a middle of row and think that 
we still have more cells for the
  current row but actually all the remaining cells will be filtered out by the 
filter.
 
 
-
+
 TestRawAsyncTablePartialScan
 
 
-
+
 TestRawAsyncTableScan
 
 
-
+
 TestRegionInfoDisplay
 
 
-
+
 TestReplicasClient
 
 Tests for region replicas.
 
 
-
+
 TestReplicasClient.SlowMeCopro
 
 This copro is used to synchronize the tests.
 
 
-
+
 TestReplicationShell
 
 
-
+
 TestReplicaWithCluster
 
 
-
+
 TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 
 This copro is used to slow down the primary meta region 
scan a bit
 
 
-
+
 TestReplicaWithCluster.RegionServerStoppedCopro
 
 This copro is used to simulate region server down exception 
for Get and Scan
 
 
-
+
 TestReplicaWithCluster.SlowMeCopro
 
 This copro is used to synchronize the tests.
 
 
-
+
 TestRestoreSnapshotFromClient
 
 Test restore snapshots from the client
 
 
-
+
 TestRestoreSnapshotFromClientWithRegionReplicas
 
 
-
+
 TestResult
 
 
-
+
 TestResultFromCoprocessor
 
 
-
+
 TestResultFromCoprocessor.MyObserver
 
 
-
+
 TestResultScannerCursor
 
 
-
+

[01/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 033d6a8ee -> 293abb173


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
index 1b52048..ce887a2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
@@ -594,1003 +594,1033 @@
 586  private boolean failOnError = true;
 587  private boolean regionServerMode = 
false;
 588  private boolean zookeeperMode = 
false;
-589  private boolean regionServerAllRegions 
= false;
-590  private boolean writeSniffing = 
false;
-591  private long 
configuredWriteTableTimeout = DEFAULT_TIMEOUT;
-592  private boolean treatFailureAsError = 
false;
-593  private TableName writeTableName = 
DEFAULT_WRITE_TABLE_NAME;
-594  private HashMapString, Long 
configuredReadTableTimeouts = new HashMap();
-595
-596  private ExecutorService executor; // 
threads to retrieve data from regionservers
-597
-598  public Canary() {
-599this(new 
ScheduledThreadPoolExecutor(1), new RegionServerStdOutSink());
-600  }
-601
-602  public Canary(ExecutorService executor, 
Sink sink) {
-603this.executor = executor;
-604this.sink = sink;
-605  }
-606
-607  @Override
-608  public Configuration getConf() {
-609return conf;
-610  }
-611
-612  @Override
-613  public void setConf(Configuration conf) 
{
-614this.conf = conf;
-615  }
-616
-617  private int parseArgs(String[] args) 
{
-618int index = -1;
-619// Process command line args
-620for (int i = 0; i  args.length; 
i++) {
-621  String cmd = args[i];
-622
-623  if (cmd.startsWith("-")) {
-624if (index = 0) {
-625  // command line args must be in 
the form: [opts] [table 1 [table 2 ...]]
-626  System.err.println("Invalid 
command line options");
-627  printUsageAndExit();
-628}
-629
-630if (cmd.equals("-help")) {
-631  // user asked for help, print 
the help and quit.
-632  printUsageAndExit();
-633} else if (cmd.equals("-daemon") 
 interval == 0) {
-634  // user asked for daemon mode, 
set a default interval between checks
-635  interval = DEFAULT_INTERVAL;
-636} else if 
(cmd.equals("-interval")) {
-637  // user has specified an 
interval for canary breaths (-interval N)
-638  i++;
-639
-640  if (i == args.length) {
-641System.err.println("-interval 
needs a numeric value argument.");
-642printUsageAndExit();
-643  }
-644
-645  try {
-646interval = 
Long.parseLong(args[i]) * 1000;
-647  } catch (NumberFormatException 
e) {
-648System.err.println("-interval 
needs a numeric value argument.");
-649printUsageAndExit();
-650  }
-651} else if 
(cmd.equals("-zookeeper")) {
-652  this.zookeeperMode = true;
-653} else 
if(cmd.equals("-regionserver")) {
-654  this.regionServerMode = true;
-655} else 
if(cmd.equals("-allRegions")) {
-656  this.regionServerAllRegions = 
true;
-657} else 
if(cmd.equals("-writeSniffing")) {
-658  this.writeSniffing = true;
-659} else 
if(cmd.equals("-treatFailureAsError")) {
-660  this.treatFailureAsError = 
true;
-661} else if (cmd.equals("-e")) {
-662  this.useRegExp = true;
-663} else if (cmd.equals("-t")) {
-664  i++;
-665
-666  if (i == args.length) {
-667System.err.println("-t needs 
a numeric value argument.");
-668printUsageAndExit();
-669  }
-670
-671  try {
-672this.timeout = 
Long.parseLong(args[i]);
-673  } catch (NumberFormatException 
e) {
-674System.err.println("-t needs 
a numeric value argument.");
-675printUsageAndExit();
-676  }
-677} else 
if(cmd.equals("-writeTableTimeout")) {
-678  i++;
-679
-680  if (i == args.length) {
-681
System.err.println("-writeTableTimeout needs a numeric value argument.");
-682printUsageAndExit();
-683  }
-684
-685  try {
-686
this.configuredWriteTableTimeout = Long.parseLong(args[i]);
-687  } catch (NumberFormatException 
e) {
-688
System.err.println("-writeTableTimeout needs a numeric value argument.");
-689printUsageAndExit();
-690  }
-691} else if 
(cmd.equals("-writeTable")) {
-692  i++;
-693
-694  if (i == args.length) {
-695
System.err.println("-writeTable needs a string value argument.");
-696  

[01/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 424d7e416 -> 74f60271d


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index a8cb7c4..8ec6dad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -2831,5851 +2831,5852 @@
 2823status.setStatus(msg);
 2824
 2825if (rsServices != null  
rsServices.getMetrics() != null) {
-2826  
rsServices.getMetrics().updateFlush(time - startTime,
-2827  mss.getDataSize(), 
flushedOutputFileSize);
-2828}
-2829
-2830return new 
FlushResultImpl(compactionRequested ?
-2831
FlushResult.Result.FLUSHED_COMPACTION_NEEDED :
-2832  
FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId);
-2833  }
-2834
-2835  /**
-2836   * Method to safely get the next 
sequence number.
-2837   * @return Next sequence number 
unassociated with any actual edit.
-2838   * @throws IOException
-2839   */
-2840  @VisibleForTesting
-2841  protected long getNextSequenceId(final 
WAL wal) throws IOException {
-2842WriteEntry we = mvcc.begin();
-2843mvcc.completeAndWait(we);
-2844return we.getWriteNumber();
-2845  }
-2846
-2847  
//
-2848  // get() methods for client use.
-2849  
//
-2850
-2851  @Override
-2852  public RegionScannerImpl 
getScanner(Scan scan) throws IOException {
-2853   return getScanner(scan, null);
-2854  }
-2855
-2856  @Override
-2857  public RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners)
-2858  throws IOException {
-2859return getScanner(scan, 
additionalScanners, HConstants.NO_NONCE, HConstants.NO_NONCE);
-2860  }
-2861
-2862  private RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners,
-2863  long nonceGroup, long nonce) 
throws IOException {
-2864
startRegionOperation(Operation.SCAN);
-2865try {
-2866  // Verify families are all valid
-2867  if (!scan.hasFamilies()) {
-2868// Adding all families to 
scanner
-2869for (byte[] family : 
this.htableDescriptor.getColumnFamilyNames()) {
-2870  scan.addFamily(family);
-2871}
-2872  } else {
-2873for (byte[] family : 
scan.getFamilyMap().keySet()) {
-2874  checkFamily(family);
-2875}
-2876  }
-2877  return 
instantiateRegionScanner(scan, additionalScanners, nonceGroup, nonce);
-2878} finally {
-2879  
closeRegionOperation(Operation.SCAN);
-2880}
-2881  }
-2882
-2883  protected RegionScanner 
instantiateRegionScanner(Scan scan,
-2884  ListKeyValueScanner 
additionalScanners) throws IOException {
-2885return 
instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE,
-2886  HConstants.NO_NONCE);
-2887  }
-2888
-2889  protected RegionScannerImpl 
instantiateRegionScanner(Scan scan,
-2890  ListKeyValueScanner 
additionalScanners, long nonceGroup, long nonce) throws IOException {
-2891if (scan.isReversed()) {
-2892  if (scan.getFilter() != null) {
-2893
scan.getFilter().setReversed(true);
-2894  }
-2895  return new 
ReversedRegionScannerImpl(scan, additionalScanners, this);
-2896}
-2897return new RegionScannerImpl(scan, 
additionalScanners, this, nonceGroup, nonce);
-2898  }
-2899
-2900  /**
-2901   * Prepare a delete for a row mutation 
processor
-2902   * @param delete The passed delete is 
modified by this method. WARNING!
-2903   * @throws IOException
-2904   */
-2905  public void prepareDelete(Delete 
delete) throws IOException {
-2906// Check to see if this is a 
deleteRow insert
-2907
if(delete.getFamilyCellMap().isEmpty()){
-2908  for(byte [] family : 
this.htableDescriptor.getColumnFamilyNames()){
-2909// Don't eat the timestamp
-2910delete.addFamily(family, 
delete.getTimestamp());
-2911  }
-2912} else {
-2913  for(byte [] family : 
delete.getFamilyCellMap().keySet()) {
-2914if(family == null) {
-2915  throw new 
NoSuchColumnFamilyException("Empty family is invalid");
-2916}
-2917checkFamily(family);
-2918  }
-2919}
-2920  }
-2921
-2922  @Override
-2923  public void delete(Delete delete) 
throws IOException {
-2924checkReadOnly();
-2925checkResources();
-2926
startRegionOperation(Operation.DELETE);
-2927try {
-2928  // All edits for the given row 
(across all column families) must 

[01/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site eccf5317c -> 424d7e416


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.MyRegionServer.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.MyRegionServer.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.MyRegionServer.html
index 6822f2d..76a3116 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.MyRegionServer.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.MyRegionServer.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class TestMetaShutdownHandler.MyRegionServer
+public static class TestMetaShutdownHandler.MyRegionServer
 extends MiniHBaseCluster.MiniHBaseClusterRegionServer
 
 
@@ -260,7 +260,7 @@ extends 
 
 MyRegionServer
-publicMyRegionServer(org.apache.hadoop.conf.Configurationconf)
+publicMyRegionServer(org.apache.hadoop.conf.Configurationconf)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   org.apache.zookeeper.KeeperException,
   https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
@@ -286,7 +286,7 @@ extends 
 
 abort
-publicvoidabort(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
+publicvoidabort(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason,
   https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablecause)
 
 Specified by:



[01/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 455e3292b -> 0cf79db0e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
index 9425699..f592ec3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
@@ -30,296 +30,312 @@
 022import java.util.ArrayList;
 023import java.util.HashSet;
 024import java.util.List;
-025import java.util.Set;
-026
-027import org.apache.hadoop.hbase.Cell;
-028import 
org.apache.hadoop.hbase.CellUtil;
-029import 
org.apache.hadoop.hbase.CompareOperator;
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-032import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-034import 
org.apache.hadoop.hbase.util.Bytes;
-035
-036import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-037
-038import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-039import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-040
-041/**
-042 * A filter for adding inter-column 
timestamp matching
-043 * Only cells with a correspondingly 
timestamped entry in
-044 * the target column will be retained
-045 * Not compatible with Scan.setBatch as 
operations need 
-046 * full rows for correct filtering 
-047 */
-048@InterfaceAudience.Public
-049public class DependentColumnFilter 
extends CompareFilter {
-050
-051  protected byte[] columnFamily;
-052  protected byte[] columnQualifier;
-053  protected boolean 
dropDependentColumn;
-054
-055  protected SetLong stampSet = 
new HashSet();
-056  
-057  /**
-058   * Build a dependent column filter with 
value checking
-059   * dependent column varies will be 
compared using the supplied
-060   * compareOp and comparator, for usage 
of which
-061   * refer to {@link CompareFilter}
-062   * 
-063   * @param family dependent column 
family
-064   * @param qualifier dependent column 
qualifier
-065   * @param dropDependentColumn whether 
the column should be discarded after
-066   * @param valueCompareOp comparison op 

-067   * @param valueComparator comparator
-068   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0. Use
-069   * {@link 
#DependentColumnFilter(byte[], byte[], boolean, CompareOperator, 
ByteArrayComparable)}
-070   * instead.
-071   */
-072  @Deprecated
-073  public DependentColumnFilter(final byte 
[] family, final byte[] qualifier,
-074  final boolean dropDependentColumn, 
final CompareOp valueCompareOp,
-075final ByteArrayComparable 
valueComparator) {
-076this(family, qualifier, 
dropDependentColumn, CompareOperator.valueOf(valueCompareOp.name()),
-077  valueComparator);
-078  }
-079
-080  /**
-081   * Build a dependent column filter with 
value checking
-082   * dependent column varies will be 
compared using the supplied
-083   * compareOp and comparator, for usage 
of which
-084   * refer to {@link CompareFilter}
-085   *
-086   * @param family dependent column 
family
-087   * @param qualifier dependent column 
qualifier
-088   * @param dropDependentColumn whether 
the column should be discarded after
-089   * @param op Value comparison op
-090   * @param valueComparator comparator
-091   */
-092  public DependentColumnFilter(final byte 
[] family, final byte[] qualifier,
-093   final 
boolean dropDependentColumn, final CompareOperator op,
-094   final 
ByteArrayComparable valueComparator) {
-095// set up the comparator
-096super(op, valueComparator);
-097this.columnFamily = family;
-098this.columnQualifier = qualifier;
-099this.dropDependentColumn = 
dropDependentColumn;
-100  }
-101  
-102  /**
-103   * Constructor for DependentColumn 
filter.
-104   * Cells where a Cell from target 
column
-105   * with the same timestamp do not exist 
will be dropped.
-106   *
-107   * @param family name of target column 
family
-108   * @param qualifier name of column 
qualifier
-109   */
-110  public DependentColumnFilter(final byte 
[] family, final byte [] qualifier) {
-111this(family, qualifier, false);
-112  }
-113  
-114  /**
-115   * Constructor for DependentColumn 
filter.
-116   * Cells where a Cell from target 
column
-117   * with the same timestamp do not exist 
will be dropped.
-118   *
-119   * @param family name of dependent 
column family
-120   * @param qualifier name of dependent 
qualifier
-121   * @param dropDependentColumn whether 
the dependent 

[01/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 88e0d1a41 -> 1ff05a186


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.html
index 532a53d..705e999 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.html
@@ -28,90 +28,158 @@
 020import static org.junit.Assert.*;
 021
 022import 
java.util.AbstractMap.SimpleImmutableEntry;
-023import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-024import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-025import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-026import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
-027import 
org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
-028import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-029import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-030import 
org.apache.hadoop.hbase.util.Threads;
-031import org.junit.AfterClass;
-032import org.junit.BeforeClass;
-033import org.junit.ClassRule;
-034import org.junit.Test;
-035import 
org.junit.experimental.categories.Category;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039@Category({MasterTests.class, 
MediumTests.class})
-040public class TestMasterMetricsWrapper {
-041
-042  @ClassRule
-043  public static final HBaseClassTestRule 
CLASS_RULE =
-044  
HBaseClassTestRule.forClass(TestMasterMetricsWrapper.class);
-045
-046  private static final Logger LOG = 
LoggerFactory.getLogger(TestMasterMetricsWrapper.class);
+023import java.util.List;
+024
+025import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+026import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+027import 
org.apache.hadoop.hbase.HColumnDescriptor;
+028import 
org.apache.hadoop.hbase.HTableDescriptor;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.RegionInfo;
+031import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
+032import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
+033import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
+034import 
org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
+035import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+036import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+037import 
org.apache.hadoop.hbase.util.Bytes;
+038import 
org.apache.hadoop.hbase.util.PairOfSameType;
+039import 
org.apache.hadoop.hbase.util.Threads;
+040import org.junit.AfterClass;
+041import org.junit.BeforeClass;
+042import org.junit.ClassRule;
+043import org.junit.Test;
+044import 
org.junit.experimental.categories.Category;
+045import org.slf4j.Logger;
+046import org.slf4j.LoggerFactory;
 047
-048  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-049  private static final int NUM_RS = 4;
+048@Category({MasterTests.class, 
MediumTests.class})
+049public class TestMasterMetricsWrapper {
 050
-051  @BeforeClass
-052  public static void setup() throws 
Exception {
-053TEST_UTIL.startMiniCluster(1, 
NUM_RS);
-054  }
-055
-056  @AfterClass
-057  public static void teardown() throws 
Exception {
-058TEST_UTIL.shutdownMiniCluster();
-059  }
-060
-061  @Test
-062  public void testInfo() {
-063HMaster master = 
TEST_UTIL.getHBaseCluster().getMaster();
-064MetricsMasterWrapperImpl info = new 
MetricsMasterWrapperImpl(master);
-065
assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0);
-066
assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0);
-067assertEquals(master.getAverageLoad(), 
info.getAverageLoad(), 0);
-068assertEquals(master.getClusterId(), 
info.getClusterId());
-069
assertEquals(master.getMasterActiveTime(), info.getActiveTime());
-070
assertEquals(master.getMasterStartTime(), info.getStartTime());
-071
assertEquals(master.getMasterCoprocessors().length, 
info.getCoprocessors().length);
-072
assertEquals(master.getServerManager().getOnlineServersList().size(), 
info.getNumRegionServers());
-073int regionServerCount =
-074  NUM_RS + 
(LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration())? 1: 0);
-075assertEquals(regionServerCount, 
info.getNumRegionServers());
-076
-077String zkServers = 
info.getZookeeperQuorum();
-078
assertEquals(zkServers.split(",").length, 
TEST_UTIL.getZkCluster().getZooKeeperServerNum());
-079
-080final int index = 3;
-081LOG.info("Stopping " + 
TEST_UTIL.getMiniHBaseCluster().getRegionServer(index));
-082
TEST_UTIL.getMiniHBaseCluster().stopRegionServer(index, 

[01/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 7be88b495 -> 7ae6a80c0


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 81f5178..7df71bd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -108,3669 +108,3727 @@
 100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-104import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-106import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-107import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-108import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-109import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-110import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-111import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-112import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-113import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-114import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-115import 
org.apache.hadoop.hbase.master.locking.LockManager;
-116import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-117import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-118import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-119import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-120import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-121import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-122import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-123import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-124import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
-127import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-128import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-129import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-130import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-131import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-132import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-133import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-134import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-135import 
org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
-136import 
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-137import 
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-138import 
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-139import 
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-140import 
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-141import 
org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
-142import 
org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
-143import 
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-144import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-145import 
org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
-146import 
org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
-147import 
org.apache.hadoop.hbase.mob.MobConstants;
-148import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-149import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-150import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-151import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-152import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-153import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-154import 
org.apache.hadoop.hbase.procedure2.Procedure;
-155import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-156import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-157import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-158import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-159import 

[01/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site afca75aaa -> f3d62514e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
index ee05a1d..06f2ffa 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
@@ -138,1307 +138,1310 @@
 130  private Path OLDLOGDIR;
 131  private Path CORRUPTDIR;
 132  private Path TABLEDIR;
-133
-134  private static final int NUM_WRITERS = 
10;
-135  private static final int ENTRIES = 10; 
// entries per writer per region
-136
-137  private static final String 
FILENAME_BEING_SPLIT = "testfile";
-138  private static final TableName 
TABLE_NAME =
-139  TableName.valueOf("t1");
-140  private static final byte[] FAMILY = 
Bytes.toBytes("f1");
-141  private static final byte[] QUALIFIER = 
Bytes.toBytes("q1");
-142  private static final byte[] VALUE = 
Bytes.toBytes("v1");
-143  private static final String 
WAL_FILE_PREFIX = "wal.dat.";
-144  private static ListString 
REGIONS = new ArrayList();
-145  private static final String 
HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
-146  private static String ROBBER;
-147  private static String ZOMBIE;
-148  private static String [] GROUP = new 
String [] {"supergroup"};
-149
-150  static enum Corruptions {
-151INSERT_GARBAGE_ON_FIRST_LINE,
-152INSERT_GARBAGE_IN_THE_MIDDLE,
-153APPEND_GARBAGE,
-154TRUNCATE,
-155TRUNCATE_TRAILER
-156  }
-157
-158  @BeforeClass
-159  public static void setUpBeforeClass() 
throws Exception {
-160conf = 
TEST_UTIL.getConfiguration();
-161
conf.setClass("hbase.regionserver.hlog.writer.impl",
-162InstrumentedLogWriter.class, 
Writer.class);
-163// This is how you turn off 
shortcircuit read currently.  TODO: Fix.  Should read config.
-164
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
-165// Create fake maping user to group 
and set it to the conf.
-166MapString, String [] u2g_map 
= new HashMap(2);
-167ROBBER = User.getCurrent().getName() 
+ "-robber";
-168ZOMBIE = User.getCurrent().getName() 
+ "-zombie";
-169u2g_map.put(ROBBER, GROUP);
-170u2g_map.put(ZOMBIE, GROUP);
-171
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-172conf.setInt("dfs.heartbeat.interval", 
1);
-173TEST_UTIL.startMiniDFSCluster(2);
-174  }
-175
-176  @AfterClass
-177  public static void tearDownAfterClass() 
throws Exception {
-178TEST_UTIL.shutdownMiniDFSCluster();
-179  }
-180
-181  @Rule
-182  public TestName name = new 
TestName();
-183  private WALFactory wals = null;
-184
-185  @Before
-186  public void setUp() throws Exception 
{
-187LOG.info("Cleaning up cluster for new 
test.");
-188fs = 
TEST_UTIL.getDFSCluster().getFileSystem();
-189HBASEDIR = 
TEST_UTIL.createRootDir();
-190HBASELOGDIR = 
TEST_UTIL.createWALRootDir();
-191OLDLOGDIR = new Path(HBASELOGDIR, 
HConstants.HREGION_OLDLOGDIR_NAME);
-192CORRUPTDIR = new Path(HBASELOGDIR, 
HConstants.CORRUPT_DIR_NAME);
-193TABLEDIR = 
FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
-194REGIONS.clear();
-195Collections.addAll(REGIONS, "bbb", 
"ccc");
-196InstrumentedLogWriter.activateFailure 
= false;
-197wals = new WALFactory(conf, 
name.getMethodName());
-198WALDIR = new Path(HBASELOGDIR,
-199
AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
-20016010, 
System.currentTimeMillis()).toString()));
-201//fs.mkdirs(WALDIR);
-202  }
-203
-204  @After
-205  public void tearDown() throws Exception 
{
-206try {
-207  wals.close();
-208} catch(IOException exception) {
-209  // Some tests will move WALs out 
from under us. In those cases, we'll get an error on close.
-210  LOG.info("Ignoring an error while 
closing down our WALFactory. Fine for some tests, but if" +
-211  " you see a failure look 
here.");
-212  LOG.debug("exception details", 
exception);
-213} finally {
-214  wals = null;
-215  fs.delete(HBASEDIR, true);
-216  fs.delete(HBASELOGDIR, true);
-217}
-218  }
-219
-220  /**
-221   * Simulates splitting a WAL out from 
under a regionserver that is still trying to write it.
-222   * Ensures we do not lose edits.
-223   * @throws IOException
-224   * @throws InterruptedException
-225   */
-226  @Test
-227  public void 
testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
-228final AtomicLong counter = new 

[01/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a44d79699 -> 7cf6034ba


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
index 88d8c36..996b13a 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":42,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":42,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class MiniHBaseCluster
+public class MiniHBaseCluster
 extends HBaseCluster
 This class creates a single process HBase cluster.
  each server.  The master uses the 'default' FileSystem.  The RegionServers,
@@ -463,38 +463,45 @@ extends 
 void
+killNameNode(org.apache.hadoop.hbase.ServerNameserverName)
+Kills the namenode process if this is a distributed 
cluster, otherwise, this causes master to
+ exit doing basic clean up only.
+
+
+
+void
 killRegionServer(org.apache.hadoop.hbase.ServerNameserverName)
 Kills the region server process if this is a distributed 
cluster, otherwise
  this causes the region server to exit doing basic clean up only.
 
 
-
+
 void
 killZkNode(org.apache.hadoop.hbase.ServerNameserverName)
 Kills the zookeeper node process if this is a distributed 
cluster, otherwise,
  this causes master to exit doing basic clean up only.
 
 
-
+
 void
 shutdown()
 Shut down the mini HBase cluster
 
 
-
+
 void
 startDataNode(org.apache.hadoop.hbase.ServerNameserverName)
 Starts a new datanode on the given hostname or if this is a 
mini/local cluster,
  silently logs warning message.
 
 
-
+
 org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
 startMaster()
 Starts a master thread running
 
 
-
+
 void
 startMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -502,13 +509,20 @@ extends 
+
+void
+startNameNode(org.apache.hadoop.hbase.ServerNameserverName)
+Starts a new namenode on the given hostname or if this is a 
mini/local cluster, silently logs
+ warning message.
+
+
+
 org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread
 startRegionServer()
 Starts a region server thread running
 
 
-
+
 void
 startRegionServer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
  intport)
@@ -516,13 +530,13 @@ extends 
+
 org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread
 startRegionServerAndWait(longtimeout)
 Starts a region server thread and waits until its processed 
by master.
 
 
-
+
 void
 startZkNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -530,120 +544,140 @@ extends 
+
 void
 stopDataNode(org.apache.hadoop.hbase.ServerNameserverName)
 Stops the datanode if this is a distributed cluster, 
otherwise
  silently logs warning message.
 
 
-
+
 org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
 stopMaster(intserverNumber)
 Shut down the specified master cleanly
 
 
-
+
 org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread
 stopMaster(intserverNumber,
   booleanshutdownFS)
 Shut down the 

[01/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6daeaf094 -> 804782f09


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.NamespaceExclusiveLock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.NamespaceExclusiveLock.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.NamespaceExclusiveLock.html
index aba85f1..a2daee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.NamespaceExclusiveLock.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.NamespaceExclusiveLock.html
@@ -84,420 +84,411 @@
 076  private String description;
 077  // True when recovery of master lock 
from WALs
 078  private boolean recoveredMasterLock;
-079  // this is for internal working
-080  private boolean hasLock;
-081
-082  private final 
ProcedureEventLockProcedure event = new ProcedureEvent(this);
-083  // True if this proc acquired relevant 
locks. This value is for client checks.
-084  private final AtomicBoolean locked = 
new AtomicBoolean(false);
-085  // Last system time (in ms) when client 
sent the heartbeat.
-086  // Initialize to system time for 
non-null value in case of recovery.
-087  private final AtomicLong lastHeartBeat 
= new AtomicLong();
-088  // Set to true when unlock request is 
received.
-089  private final AtomicBoolean unlock = 
new AtomicBoolean(false);
-090  // decreased when locks are acquired. 
Only used for local (with master process) purposes.
-091  // Setting latch to non-null value 
increases default timeout to
-092  // 
DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS (10 min) so that there is no need to 
heartbeat.
-093  private final CountDownLatch 
lockAcquireLatch;
-094
-095  @Override
-096  public TableName getTableName() {
-097return tableName;
-098  }
-099
-100  @Override
-101  public TableOperationType 
getTableOperationType() {
-102return opType;
-103  }
-104
-105  private interface LockInterface {
-106boolean 
acquireLock(MasterProcedureEnv env);
-107void releaseLock(MasterProcedureEnv 
env);
-108  }
-109
-110  public LockProcedure() {
-111lockAcquireLatch = null;
-112  }
-113
-114  private LockProcedure(final 
Configuration conf, final LockType type,
-115  final String description, final 
CountDownLatch lockAcquireLatch) {
-116this.type = type;
-117this.description = description;
-118this.lockAcquireLatch = 
lockAcquireLatch;
-119if (lockAcquireLatch == null) {
-120  
setTimeout(conf.getInt(REMOTE_LOCKS_TIMEOUT_MS_CONF, 
DEFAULT_REMOTE_LOCKS_TIMEOUT_MS));
-121} else {
-122  
setTimeout(conf.getInt(LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF,
-123  
DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS));
-124}
-125  }
-126
-127  /**
-128   * Constructor for namespace lock.
-129   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-130   */
-131  public LockProcedure(final 
Configuration conf, final String namespace, final LockType type,
-132  final String description, final 
CountDownLatch lockAcquireLatch)
-133  throws IllegalArgumentException {
-134this(conf, type, description, 
lockAcquireLatch);
-135
-136if (namespace.isEmpty()) {
-137  throw new 
IllegalArgumentException("Empty namespace");
-138}
-139
-140this.namespace = namespace;
-141this.lock = setupNamespaceLock();
-142  }
-143
-144  /**
-145   * Constructor for table lock.
-146   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-147   */
-148  public LockProcedure(final 
Configuration conf, final TableName tableName, final LockType type,
-149  final String description, final 
CountDownLatch lockAcquireLatch)
-150  throws IllegalArgumentException {
-151this(conf, type, description, 
lockAcquireLatch);
-152
-153this.tableName = tableName;
-154this.lock = setupTableLock();
-155  }
-156
-157  /**
-158   * Constructor for region lock(s).
-159   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-160   *Useful for 
locks acquired locally from master process.
-161   * @throws IllegalArgumentException if 
all regions are not from same table.
-162   */
-163  public LockProcedure(final 
Configuration conf, final RegionInfo[] regionInfos,
-164  final LockType type, final String 
description, final CountDownLatch lockAcquireLatch)
-165  throws IllegalArgumentException {
-166this(conf, type, description, 
lockAcquireLatch);
-167
-168// Build RegionInfo from region 
names.
-169if (regionInfos.length == 0) {
-170  throw new 
IllegalArgumentException("No regions specified for region lock");
-171}
-172
-173// check all regions belong to same 
table.
-174final TableName regionTable = 

[01/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 972a3c890 -> df8fd1d31


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 

[01/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a207ad3b2 -> 0c6f447e0


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/InvalidFamilyOperationException.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/class-use/InvalidFamilyOperationException.html
 
b/apidocs/org/apache/hadoop/hbase/class-use/InvalidFamilyOperationException.html
index 998386b..decf76a 100644
--- 
a/apidocs/org/apache/hadoop/hbase/class-use/InvalidFamilyOperationException.html
+++ 
b/apidocs/org/apache/hadoop/hbase/class-use/InvalidFamilyOperationException.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.InvalidFamilyOperationException的使用 
(Apache HBase 3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.InvalidFamilyOperationException 
(Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无
框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[01/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b526f2562 -> 5427a45e2


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
index 5a4f6cc..cd41ac7 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-Uses of Class org.apache.hadoop.hbase.HTableDescriptor (Apache HBase 
3.0.0-SNAPSHOT API)
+类 org.apache.hadoop.hbase.HTableDescriptor的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-Prev
-Next
+上一个
+下一个
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 

[01/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8e541fb05 -> bcb555af5


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index 8cb24b3..bd3c59e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -426,1268 +426,1286 @@
 418   * @param inMemory if block is 
in-memory
 419   * @param wait if true, blocking wait 
when queue is full
 420   */
-421  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
+421  private void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
 422  boolean wait) {
-423if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-424if (!cacheEnabled) {
-425  return;
-426}
-427
-428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
-429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430
-431  try {
-432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
-433if (comparison != 0) {
-434  if (comparison  0) {
-435LOG.warn("Cached block 
contents differ by nextBlockOnDiskSize. Keeping cached block.");
-436return;
-437  } else {
-438LOG.warn("Cached block 
contents differ by nextBlockOnDiskSize. Caching new block.");
-439  }
-440} else {
-441  String msg = "Caching an 
already cached block: " + cacheKey;
-442  msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-443  LOG.warn(msg);
-444  return;
-445}
-446  } finally {
-447// return the block since we need 
to decrement the count
-448returnBlock(cacheKey, 
existingBlock);
-449  }
-450}
-451
-452/*
-453 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-454 */
-455RAMQueueEntry re =
-456new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-457if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-458  return;
-459}
-460int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-461BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-462boolean successfulAddition = false;
-463if (wait) {
-464  try {
-465successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-466  } catch (InterruptedException e) 
{
-467
Thread.currentThread().interrupt();
-468  }
-469} else {
-470  successfulAddition = 
bq.offer(re);
-471}
-472if (!successfulAddition) {
-473  ramCache.remove(cacheKey);
-474  cacheStats.failInsert();
-475} else {
-476  this.blockNumber.increment();
-477  
this.heapSize.add(cachedItem.heapSize());
-478  blocksByHFile.add(cacheKey);
-479}
-480  }
-481
-482  /**
-483   * Get the buffer of the block with the 
specified key.
-484   * @param key block's cache key
-485   * @param caching true if the caller 
caches blocks on cache misses
-486   * @param repeat Whether this is a 
repeat lookup for the same block
-487   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-488   * @return buffer of specified cache 
key, or null if not in cache
-489   */
-490  @Override
-491  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-492  boolean updateCacheMetrics) {
-493if (!cacheEnabled) {
-494  return null;
-495}
-496RAMQueueEntry re = 
ramCache.get(key);
-497if (re != null) {
-498  if (updateCacheMetrics) {
-499cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-500  }
-501  
re.access(accessCount.incrementAndGet());
-502  return re.getData();
-503}
-504BucketEntry bucketEntry = 
backingMap.get(key);
-505if (bucketEntry != null) {
-506  long start = System.nanoTime();
-507  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-508  try {
-509lock.readLock().lock();
-510// We can not read here even if 
backingMap does contain the given key because its offset
-511// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-512// existence here.
-513if 
(bucketEntry.equals(backingMap.get(key))) {
-514  // TODO : change this area - 
should be removed after server cells and
-515

[01/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 72a723269 -> a5c66de03


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index c10cfbf..a3e2f4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -3371,7 +3371,7 @@
 3363private V result = null;
 3364
 3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
 3367
 3368public ProcedureFuture(final 
HBaseAdmin admin, final Long procId) {
 3369  this.admin = admin;
@@ -3653,653 +3653,651 @@
 3645 * @return a description of the 
operation
 3646 */
 3647protected String getDescription() 
{
-3648  return "Operation: " + 
getOperationType() + ", "
-3649  + "Table Name: " + 
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class 
TableWaitForStateCallable implements WaitForStateCallable {
-3654  @Override
-3655  public void 
throwInterruptedException() throws InterruptedIOException {
-3656throw new 
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on 
table: " + tableName.getNameWithNamespaceInclAsString());
-3658  }
-3659
-3660  @Override
-3661  public void 
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The 
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString() 
+ " has not completed after " + elapsedTime + "ms");
-3664  }
-3665}
-3666
-3667@Override
-3668protected V 
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException, 
TimeoutException {
-3670  LOG.info(getDescription() + " 
completed");
-3671  return 
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V 
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException, 
TimeoutException {
-3677  LOG.info(getDescription() + " 
failed with " + exception.getMessage());
-3678  return 
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void 
waitForTableEnabled(final long deadlineTs)
-3682throws IOException, 
TimeoutException {
-3683  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int 
tries) throws IOException {
-3686  try {
-3687if 
(getAdmin().isTableAvailable(tableName)) {
-3688  return true;
-3689}
-3690  } catch 
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " + 
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled, 
sleeping. tries=" + tries);
-3693  }
-3694  return false;
-3695}
-3696  });
-3697}
-3698
-3699protected void 
waitForTableDisabled(final long deadlineTs)
-3700throws IOException, 
TimeoutException {
-3701  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int 
tries) throws IOException {
-3704  return 
getAdmin().isTableDisabled(tableName);
-3705}
-3706  });
-3707}
-3708
-3709protected void 
waitTableNotFound(final long deadlineTs)
-3710throws IOException, 
TimeoutException {
-3711  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int 
tries) throws IOException {
-3714  return 
!getAdmin().tableExists(tableName);
-3715}
-3716  });
-3717}
-3718
-3719protected void 
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException, 
TimeoutException {
-3721  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int 
tries) throws IOException {
-3724  return 
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726  });
-3727}
-3728
-3729protected void 
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException, 
TimeoutException {
-3731  final TableDescriptor desc = 
getTableDescriptor();
-3732  final AtomicInteger actualRegCount 
= new AtomicInteger(0);

[01/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d4f126b4b -> eb5d2c628


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
new file mode 100644
index 000..3afcaf1
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
@@ -0,0 +1,754 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SyncReplicationReplayWALRemoteProcedure (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.replication
+Class 
SyncReplicationReplayWALRemoteProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureMasterProcedureEnv
+
+
+org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALRemoteProcedure
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, PeerProcedureInterface, RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName
+
+
+
+@InterfaceAudience.Private
+public class SyncReplicationReplayWALRemoteProcedure
+extends ProcedureMasterProcedureEnv
+implements RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName, PeerProcedureInterface
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.PeerProcedureInterface
+PeerProcedureInterface.PeerOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private boolean
+dispatched
+
+
+private ProcedureEvent?
+event
+
+
+private static org.slf4j.Logger
+LOG
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+peerId
+
+
+private boolean
+succ
+
+
+private ServerName
+targetServer
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+wals
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SyncReplicationReplayWALRemoteProcedure()
+
+
+SyncReplicationReplayWALRemoteProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwals,
+   ServerNametargetServer)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected boolean
+abort(MasterProcedureEnvenv)
+The abort() call is asynchronous and each procedure must 
decide how to deal
+ with it, 

[01/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a87629c43 -> 55ce8d974


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
index 73eb6b4..26a1ef9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
@@ -60,526 +60,539 @@
 052  private int memstoreSizeMB = 0;
 053  private long storefileIndexSizeKB = 
0;
 054  private long readRequestsCount = 0;
-055  private long filteredReadRequestsCount 
= 0;
-056  private long writeRequestsCount = 0;
-057  private int rootIndexSizeKB = 0;
-058  private int totalStaticIndexSizeKB = 
0;
-059  private int totalStaticBloomSizeKB = 
0;
-060  private long totalCompactingKVs = 0;
-061  private long currentCompactedKVs = 0;
-062
-063  /**
-064   * DONT USE this construction. It make 
a fake server name;
-065   */
-066  @InterfaceAudience.Private
-067  public 
ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
-068
this(ServerName.valueOf("localhost,1,1"), serverLoad);
-069  }
-070
-071  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
-072  @InterfaceAudience.Private
-073  public ServerLoad(ServerName name, 
ClusterStatusProtos.ServerLoad serverLoad) {
-074
this(ServerMetricsBuilder.toServerMetrics(name, serverLoad));
-075this.serverLoad = serverLoad;
-076  }
-077
-078  @InterfaceAudience.Private
-079  public ServerLoad(ServerMetrics 
metrics) {
-080this.metrics = metrics;
-081this.serverLoad = 
ServerMetricsBuilder.toServerLoad(metrics);
-082for (RegionMetrics rl : 
metrics.getRegionMetrics().values()) {
-083  stores += rl.getStoreCount();
-084  storefiles += 
rl.getStoreFileCount();
-085  storeUncompressedSizeMB += 
rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
-086  storefileSizeMB += 
rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
-087  memstoreSizeMB += 
rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
-088  readRequestsCount += 
rl.getReadRequestCount();
-089  filteredReadRequestsCount += 
rl.getFilteredReadRequestCount();
-090  writeRequestsCount += 
rl.getWriteRequestCount();
-091  storefileIndexSizeKB += 
rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE);
-092  rootIndexSizeKB += 
rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
-093  totalStaticIndexSizeKB += 
rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
-094  totalStaticBloomSizeKB += 
rl.getBloomFilterSize().get(Size.Unit.KILOBYTE);
-095  totalCompactingKVs += 
rl.getCompactingCellCount();
-096  currentCompactedKVs += 
rl.getCompactedCellCount();
-097}
-098  }
-099
-100  /**
-101   * NOTE: Function name cannot start 
with "get" because then an OpenDataException is thrown because
-102   * HBaseProtos.ServerLoad cannot be 
converted to an open data type(see HBASE-5967).
-103   * @return the underlying ServerLoad 
protobuf object
-104   * @deprecated DONT use this pb object 
since the byte array backed may be modified in rpc layer
-105   */
-106  @InterfaceAudience.Private
-107  @Deprecated
-108  public ClusterStatusProtos.ServerLoad 
obtainServerLoadPB() {
-109return serverLoad;
-110  }
-111
-112  protected 
ClusterStatusProtos.ServerLoad serverLoad;
+055  private long cpRequestsCount = 0;
+056  private long filteredReadRequestsCount 
= 0;
+057  private long writeRequestsCount = 0;
+058  private int rootIndexSizeKB = 0;
+059  private int totalStaticIndexSizeKB = 
0;
+060  private int totalStaticBloomSizeKB = 
0;
+061  private long totalCompactingKVs = 0;
+062  private long currentCompactedKVs = 0;
+063
+064  /**
+065   * DONT USE this construction. It make 
a fake server name;
+066   */
+067  @InterfaceAudience.Private
+068  public 
ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
+069
this(ServerName.valueOf("localhost,1,1"), serverLoad);
+070  }
+071
+072  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD")
+073  @InterfaceAudience.Private
+074  public ServerLoad(ServerName name, 
ClusterStatusProtos.ServerLoad serverLoad) {
+075
this(ServerMetricsBuilder.toServerMetrics(name, serverLoad));
+076this.serverLoad = serverLoad;
+077  }
+078
+079  @InterfaceAudience.Private
+080  public ServerLoad(ServerMetrics 
metrics) {
+081this.metrics = metrics;
+082this.serverLoad = 
ServerMetricsBuilder.toServerLoad(metrics);
+083for (RegionMetrics rl : 
metrics.getRegionMetrics().values()) {
+084  stores += rl.getStoreCount();
+085  storefiles += 
rl.getStoreFileCount();
+086  storeUncompressedSizeMB += 
rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE);
+087  

[01/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cb3a62e7c -> 2b11656ff


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index 541beed..1100e95 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -42,1015 +42,1038 @@
 034import 
java.util.concurrent.ConcurrentHashMap;
 035import 
java.util.concurrent.ConcurrentSkipListMap;
 036import 
java.util.concurrent.atomic.AtomicInteger;
-037
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.ServerName;
-040import 
org.apache.hadoop.hbase.TableName;
-041import 
org.apache.hadoop.hbase.client.RegionInfo;
-042import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-043import 
org.apache.hadoop.hbase.master.RegionState;
-044import 
org.apache.hadoop.hbase.master.RegionState.State;
-045import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-046import 
org.apache.hadoop.hbase.util.Bytes;
-047import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-048import 
org.apache.yetus.audience.InterfaceAudience;
-049import org.slf4j.Logger;
-050import org.slf4j.LoggerFactory;
-051import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-052
-053/**
-054 * RegionStates contains a set of Maps 
that describes the in-memory state of the AM, with
-055 * the regions available in the system, 
the region in transition, the offline regions and
-056 * the servers holding regions.
-057 */
-058@InterfaceAudience.Private
-059public class RegionStates {
-060  private static final Logger LOG = 
LoggerFactory.getLogger(RegionStates.class);
-061
-062  protected static final State[] 
STATES_EXPECTED_ON_OPEN = new State[] {
-063State.OPEN, // State may already be 
OPEN if we died after receiving the OPEN from regionserver
-064// but before complete 
finish of AssignProcedure. HBASE-20100.
-065State.OFFLINE, State.CLOSED,  // 
disable/offline
-066State.SPLITTING, State.SPLIT, // 
ServerCrashProcedure
-067State.OPENING, State.FAILED_OPEN, // 
already in-progress (retrying)
-068  };
-069
-070  protected static final State[] 
STATES_EXPECTED_ON_CLOSE = new State[] {
-071State.SPLITTING, State.SPLIT, 
State.MERGING, // ServerCrashProcedure
-072State.OPEN,   // 
enabled/open
-073State.CLOSING // 
already in-progress (retrying)
-074  };
-075
-076  private static class 
AssignmentProcedureEvent extends ProcedureEventRegionInfo {
-077public AssignmentProcedureEvent(final 
RegionInfo regionInfo) {
-078  super(regionInfo);
-079}
-080  }
-081
-082  private static class ServerReportEvent 
extends ProcedureEventServerName {
-083public ServerReportEvent(final 
ServerName serverName) {
-084  super(serverName);
-085}
-086  }
-087
-088  /**
-089   * Current Region State.
-090   * In-memory only. Not persisted.
-091   */
-092  // Mutable/Immutable? Changes have to 
be synchronized or not?
-093  // Data members are volatile which 
seems to say multi-threaded access is fine.
-094  // In the below we do check and set but 
the check state could change before
-095  // we do the set because no 
synchronizationwhich seems dodgy. Clear up
-096  // understanding here... how many 
threads accessing? Do locks make it so one
-097  // thread at a time working on a single 
Region's RegionStateNode? Lets presume
-098  // so for now. Odd is that elsewhere in 
this RegionStates, we synchronize on
-099  // the RegionStateNode instance. 
TODO.
-100  public static class RegionStateNode 
implements ComparableRegionStateNode {
-101private final RegionInfo 
regionInfo;
-102private final ProcedureEvent? 
event;
-103
-104private volatile 
RegionTransitionProcedure procedure = null;
-105private volatile ServerName 
regionLocation = null;
-106private volatile ServerName lastHost 
= null;
-107/**
-108 * A Region-in-Transition (RIT) moves 
through states.
-109 * See {@link State} for complete 
list. A Region that
-110 * is opened moves from OFFLINE = 
OPENING = OPENED.
-111 */
-112private volatile State state = 
State.OFFLINE;
-113
-114/**
-115 * Updated whenever a call to {@link 
#setRegionLocation(ServerName)}
-116 * or {@link #setState(State, 
State...)}.
-117 */
-118private volatile long lastUpdate = 
0;
-119
-120private volatile long openSeqNum = 
HConstants.NO_SEQNUM;
-121
-122public RegionStateNode(final 

[01/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 630414573 -> 65565d77b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index c1b4533..a0c568b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -136,220 +136,221 @@
 128  public void 
updateRegionLocation(RegionStates.RegionStateNode regionStateNode)
 129  throws IOException {
 130if 
(regionStateNode.getRegionInfo().isMetaRegion()) {
-131  
updateMetaLocation(regionStateNode.getRegionInfo(), 
regionStateNode.getRegionLocation());
-132} else {
-133  long openSeqNum = 
regionStateNode.getState() == State.OPEN ?
-134  regionStateNode.getOpenSeqNum() 
: HConstants.NO_SEQNUM;
-135  
updateUserRegionLocation(regionStateNode.getRegionInfo(), 
regionStateNode.getState(),
-136  
regionStateNode.getRegionLocation(), regionStateNode.getLastHost(), 
openSeqNum,
-137  // The regionStateNode may have 
no procedure in a test scenario; allow for this.
-138  regionStateNode.getProcedure() 
!= null?
-139  
regionStateNode.getProcedure().getProcId(): Procedure.NO_PROC_ID);
-140}
-141  }
-142
-143  private void updateMetaLocation(final 
RegionInfo regionInfo, final ServerName serverName)
-144  throws IOException {
-145try {
-146  
MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName,
-147regionInfo.getReplicaId(), 
State.OPEN);
-148} catch (KeeperException e) {
-149  throw new IOException(e);
-150}
-151  }
-152
-153  private void 
updateUserRegionLocation(final RegionInfo regionInfo, final State state,
-154  final ServerName regionLocation, 
final ServerName lastHost, final long openSeqNum,
-155  final long pid)
-156  throws IOException {
-157long time = 
EnvironmentEdgeManager.currentTime();
-158final int replicaId = 
regionInfo.getReplicaId();
-159final Put put = new 
Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo), time);
-160MetaTableAccessor.addRegionInfo(put, 
regionInfo);
-161final StringBuilder info =
-162  new 
StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
-163
.append(regionInfo.getEncodedName()).append(", regionState=").append(state);
-164if (openSeqNum = 0) {
-165  Preconditions.checkArgument(state 
== State.OPEN  regionLocation != null,
-166  "Open region should be on a 
server");
-167  MetaTableAccessor.addLocation(put, 
regionLocation, openSeqNum, replicaId);
-168  // only update replication barrier 
for default replica
-169  if (regionInfo.getReplicaId() == 
RegionInfo.DEFAULT_REPLICA_ID 
-170
hasGlobalReplicationScope(regionInfo.getTable())) {
-171
MetaTableAccessor.addReplicationBarrier(put, openSeqNum);
-172info.append(", 
repBarrier=").append(openSeqNum);
-173  }
-174  info.append(", 
openSeqNum=").append(openSeqNum);
-175  info.append(", 
regionLocation=").append(regionLocation);
-176} else if (regionLocation != null 
 !regionLocation.equals(lastHost)) {
-177  // Ideally, if no regionLocation, 
write null to the hbase:meta but this will confuse clients
-178  // currently; they want a server to 
hit. TODO: Make clients wait if no location.
-179  
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-180  .setRow(put.getRow())
-181  
.setFamily(HConstants.CATALOG_FAMILY)
-182  
.setQualifier(getServerNameColumn(replicaId))
-183  
.setTimestamp(put.getTimestamp())
-184  .setType(Cell.Type.Put)
-185  
.setValue(Bytes.toBytes(regionLocation.getServerName()))
-186  .build());
-187  info.append(", 
regionLocation=").append(regionLocation);
-188}
-189
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-190.setRow(put.getRow())
-191
.setFamily(HConstants.CATALOG_FAMILY)
-192
.setQualifier(getStateColumn(replicaId))
-193
.setTimestamp(put.getTimestamp())
-194.setType(Cell.Type.Put)
-195
.setValue(Bytes.toBytes(state.name()))
-196.build());
-197LOG.info(info.toString());
-198updateRegionLocation(regionInfo, 
state, put);
-199  }
-200
-201  private void 
updateRegionLocation(RegionInfo regionInfo, State state, Put put)
-202  throws IOException {
-203try (Table table = 
master.getConnection().getTable(TableName.META_TABLE_NAME)) {
-204  table.put(put);
-205} catch (IOException e) {
-206  // TODO: Revist Means that if a 

[01/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 436b0b15e -> e11cf2cba


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/downloads.html
--
diff --git a/downloads.html b/downloads.html
index f1c834c..5ba20c9 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase Downloads
 
@@ -366,7 +366,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-06-13
+  Last Published: 
2018-06-14
 
 
 



[01/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cddd30637 -> 3469cbc0b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
index 338eb9c..55713d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
@@ -401,10 +401,10 @@
 393" failed when creating the 
node for " + destServerName,
 394  e);
 395}
-396try {
-397  String oldQueueNode = 
getQueueNode(sourceServerName, queueId);
-398  ListString wals = 
ZKUtil.listChildrenNoWatch(zookeeper, oldQueueNode);
-399  String newQueueId = queueId + "-" + 
sourceServerName;
+396String newQueueId = queueId + "-" + 
sourceServerName;
+397try {
+398  String oldQueueNode = 
getQueueNode(sourceServerName, queueId);
+399  ListString wals = 
ZKUtil.listChildrenNoWatch(zookeeper, oldQueueNode);
 400  if (CollectionUtils.isEmpty(wals)) 
{
 401
ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode);
 402LOG.info("Removed empty {}/{}", 
sourceServerName, queueId);
@@ -435,254 +435,255 @@
 427  return new Pair(newQueueId, 
logQueue);
 428} catch (NoNodeException | 
NodeExistsException | NotEmptyException | BadVersionException e) {
 429  // Multi call failed; it looks like 
some other regionserver took away the logs.
-430  // These exceptions mean that zk 
tells us the request can not be execute so it is safe to just
-431  // return a null. For other types 
of exception should be thrown out to notify the upper layer.
-432  LOG.info("Claim queue queueId={} 
from {} to {} failed with {}, someone else took the log?",
-433  queueId,sourceServerName, 
destServerName, e.toString());
-434  return null;
-435} catch (KeeperException | 
InterruptedException e) {
-436  throw new 
ReplicationException("Claim queue queueId=" + queueId + " from " +
-437sourceServerName + " to " + 
destServerName + " failed", e);
-438}
-439  }
-440
-441  @Override
-442  public void 
removeReplicatorIfQueueIsEmpty(ServerName serverName) throws 
ReplicationException {
-443try {
-444  
ZKUtil.deleteNodeFailSilent(zookeeper, getRsNode(serverName));
-445} catch (NotEmptyException e) {
-446  // keep silence to avoid logging 
too much.
-447} catch (KeeperException e) {
-448  throw new 
ReplicationException("Failed to remove replicator for " + serverName, e);
-449}
-450  }
-451
-452  private ListServerName 
getListOfReplicators0() throws KeeperException {
-453ListString children = 
ZKUtil.listChildrenNoWatch(zookeeper, queuesZNode);
-454if (children == null) {
-455  children = 
Collections.emptyList();
-456}
-457return 
children.stream().map(ServerName::parseServerName).collect(toList());
-458  }
-459
-460  @Override
-461  public ListServerName 
getListOfReplicators() throws ReplicationException {
-462try {
-463  return getListOfReplicators0();
-464} catch (KeeperException e) {
-465  throw new 
ReplicationException("Failed to get list of replicators", e);
-466}
-467  }
-468
-469  private ListString 
getWALsInQueue0(ServerName serverName, String queueId)
-470  throws KeeperException {
-471ListString children = 
ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName,
-472queueId));
-473return children != null ? children : 
Collections.emptyList();
-474  }
-475
-476  @Override
-477  public ListString 
getWALsInQueue(ServerName serverName, String queueId)
-478  throws ReplicationException {
-479try {
-480  return getWALsInQueue0(serverName, 
queueId);
-481} catch (KeeperException e) {
-482  throw new ReplicationException(
-483  "Failed to get wals in queue 
(serverName=" + serverName + ", queueId=" + queueId + ")",
-484  e);
-485}
-486  }
-487
-488  private ListString 
getAllQueues0(ServerName serverName) throws KeeperException {
-489ListString children = 
ZKUtil.listChildrenNoWatch(zookeeper, getRsNode(serverName));
-490return children != null ? children : 
Collections.emptyList();
-491  }
-492
-493  @Override
-494  public ListString 
getAllQueues(ServerName serverName) throws ReplicationException {
-495try {
-496  return getAllQueues0(serverName);
-497} catch (KeeperException e) {
-498  throw new 
ReplicationException("Failed to get all queues (serverName=" + serverName + 
")", e);
-499}
-500  }
-501
-502  // will be overridden in UTs
-503  @VisibleForTesting
-504  protected int getQueuesZNodeCversion() 
throws 

[01/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 33f757fb2 -> 4df09ed92


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.html
index e73a619..1648ba3 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.html
@@ -32,247 +32,259 @@
 024import java.util.HashSet;
 025import java.util.List;
 026import java.util.Map;
-027import java.util.Set;
-028import org.apache.hadoop.hbase.*;
-029import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-030import 
org.apache.hadoop.hbase.client.Durability;
-031import 
org.apache.hadoop.hbase.client.Put;
-032import 
org.apache.hadoop.hbase.client.Result;
-033import 
org.apache.hadoop.hbase.client.ResultScanner;
-034import 
org.apache.hadoop.hbase.client.Scan;
-035import 
org.apache.hadoop.hbase.client.Table;
-036import 
org.apache.hadoop.hbase.testclassification.FilterTests;
-037import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-038import 
org.apache.hadoop.hbase.util.Bytes;
-039import org.junit.After;
-040import org.junit.AfterClass;
-041import org.junit.Before;
-042import org.junit.BeforeClass;
-043import org.junit.ClassRule;
-044import org.junit.Rule;
-045import org.junit.Test;
-046import 
org.junit.experimental.categories.Category;
-047import org.junit.rules.TestName;
-048import org.slf4j.Logger;
-049import org.slf4j.LoggerFactory;
-050
-051class StringRange {
-052  private String start = null;
-053  private String end = null;
-054  private boolean startInclusive = 
true;
-055  private boolean endInclusive = false;
+027import java.util.Objects;
+028import java.util.Set;
+029
+030import org.apache.hadoop.hbase.Cell;
+031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+032import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+033import 
org.apache.hadoop.hbase.KeyValue;
+034import 
org.apache.hadoop.hbase.KeyValueTestUtil;
+035import 
org.apache.hadoop.hbase.TableName;
+036import 
org.apache.hadoop.hbase.client.Durability;
+037import 
org.apache.hadoop.hbase.client.Put;
+038import 
org.apache.hadoop.hbase.client.Result;
+039import 
org.apache.hadoop.hbase.client.ResultScanner;
+040import 
org.apache.hadoop.hbase.client.Scan;
+041import 
org.apache.hadoop.hbase.client.Table;
+042import 
org.apache.hadoop.hbase.testclassification.FilterTests;
+043import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+044import 
org.apache.hadoop.hbase.util.Bytes;
+045import org.junit.After;
+046import org.junit.AfterClass;
+047import org.junit.Before;
+048import org.junit.BeforeClass;
+049import org.junit.ClassRule;
+050import org.junit.Rule;
+051import org.junit.Test;
+052import 
org.junit.experimental.categories.Category;
+053import org.junit.rules.TestName;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
 056
-057  public StringRange(String start, 
boolean startInclusive, String end,
-058  boolean endInclusive) {
-059this.start = start;
-060this.startInclusive = 
startInclusive;
-061this.end = end;
-062this.endInclusive = endInclusive;
-063  }
-064
-065  public String getStart() {
-066return this.start;
-067  }
-068
-069  public String getEnd() {
-070return this.end;
-071  }
-072
-073  public boolean isStartInclusive() {
-074return this.startInclusive;
-075  }
-076
-077  public boolean isEndInclusive() {
-078return this.endInclusive;
-079  }
-080
-081  @Override
-082  public int hashCode() {
-083int hashCode = 0;
-084if (this.start != null) {
-085  hashCode ^= 
this.start.hashCode();
-086}
-087
-088if (this.end != null) {
-089  hashCode ^= this.end.hashCode();
-090}
-091return hashCode;
-092  }
+057class StringRange {
+058  private String start = null;
+059  private String end = null;
+060  private boolean startInclusive = 
true;
+061  private boolean endInclusive = false;
+062
+063  public StringRange(String start, 
boolean startInclusive, String end,
+064  boolean endInclusive) {
+065this.start = start;
+066this.startInclusive = 
startInclusive;
+067this.end = end;
+068this.endInclusive = endInclusive;
+069  }
+070
+071  public String getStart() {
+072return this.start;
+073  }
+074
+075  public String getEnd() {
+076return this.end;
+077  }
+078
+079  public boolean isStartInclusive() {
+080return this.startInclusive;
+081  }
+082
+083  public boolean isEndInclusive() {
+084return this.endInclusive;
+085  }
+086
+087  @Override
+088  public int hashCode() {
+089int hashCode = 0;
+090if (this.start != null) {
+091  hashCode ^= 
this.start.hashCode();
+092   

[01/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 99353e00e -> 883dde2f9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
index 3d2b4ec..ff8338b 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.ScanTest
+static class PerformanceEvaluation.ScanTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -208,8 +208,10 @@ extends Method and Description
 
 
-(package private) void
-testRow(inti)
+(package private) boolean
+testRow(inti)
+Test for individual row.
+
 
 
 (package private) void
@@ -224,13 +226,6 @@ extends onStartup,
 onTakedown
 
 
-
-
-
-Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
-closeConnection,
 createConnection
-
-
 
 
 
@@ -264,7 +259,7 @@ extends 
 
 testScanner
-privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
+privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
 
 
 
@@ -281,7 +276,7 @@ extends 
 
 ScanTest
-ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
+ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,
  PerformanceEvaluation.Statusstatus)
 
@@ -300,7 +295,7 @@ extends 
 
 testTakedown
-voidtestTakedown()
+voidtestTakedown()
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -316,11 +311,19 @@ extends 
 
 testRow
-voidtestRow(inti)
-  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+booleantestRow(inti)
+ throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Description copied from 
class:PerformanceEvaluation.TestBase
+Test for individual row.
 
 Specified by:
 testRowin
 classPerformanceEvaluation.TestBase
+Parameters:
+i - Row index.
+Returns:
+true if the row was sent to server and need to record metrics.
+ False if not, multiGet and multiPut e.g., the rows are sent
+ to server only if enough gets/puts are gathered.
 Throws:
 https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 



[01/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 74b8701f3 -> 7bcc960d3


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on 

[01/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a8dd4f308 -> 92a26cfbe


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.ReadOwnWritesTester.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.ReadOwnWritesTester.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.ReadOwnWritesTester.html
index b08ee76..057351b 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.ReadOwnWritesTester.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.ReadOwnWritesTester.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class TestDefaultMemStore.ReadOwnWritesTester
+private static class TestDefaultMemStore.ReadOwnWritesTester
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -270,7 +270,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 NUM_TRIES
-static finalint NUM_TRIES
+static finalint NUM_TRIES
 
 See Also:
 Constant
 Field Values
@@ -283,7 +283,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 row
-finalbyte[] row
+finalbyte[] row
 
 
 
@@ -292,7 +292,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 f
-finalbyte[] f
+finalbyte[] f
 
 
 
@@ -301,7 +301,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 q1
-finalbyte[] q1
+finalbyte[] q1
 
 
 
@@ -310,7 +310,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 mvcc
-finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl
 mvcc
+finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl
 mvcc
 
 
 
@@ -319,7 +319,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 memstore
-finalorg.apache.hadoop.hbase.regionserver.MemStore memstore
+finalorg.apache.hadoop.hbase.regionserver.MemStore memstore
 
 
 
@@ -328,7 +328,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 caughtException
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferencehttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable caughtException
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferencehttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable caughtException
 
 
 
@@ -345,7 +345,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 ReadOwnWritesTester
-publicReadOwnWritesTester(intid,
+publicReadOwnWritesTester(intid,

org.apache.hadoop.hbase.regionserver.MemStorememstore,

org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControlmvcc,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferencehttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">ThrowablecaughtException)
@@ -365,7 +365,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttps://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
@@ -380,7 +380,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 internalRun
-privatevoidinternalRun()
+privatevoidinternalRun()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:



[01/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 64bc8d859 -> f2065178e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 

[01/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a1e6bc211 -> de18d4687


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.html
index 7a938de..43a87b6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.html
@@ -33,539 +33,515 @@
 025import java.util.ArrayList;
 026import java.util.List;
 027import java.util.UUID;
-028import 
java.util.concurrent.atomic.AtomicBoolean;
-029import 
java.util.concurrent.atomic.AtomicInteger;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import org.apache.hadoop.hbase.Cell;
-032import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-033import org.apache.hadoop.hbase.Waiter;
-034import 
org.apache.hadoop.hbase.client.Connection;
-035import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-036import 
org.apache.hadoop.hbase.client.Put;
-037import 
org.apache.hadoop.hbase.client.RegionInfo;
-038import 
org.apache.hadoop.hbase.client.Table;
-039import 
org.apache.hadoop.hbase.regionserver.HRegion;
-040import 
org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
-041import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
-042import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
-043import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
-044import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
-045import 
org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
-046import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-047import 
org.apache.hadoop.hbase.testclassification.ReplicationTests;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-050import 
org.apache.hadoop.hbase.util.Threads;
-051import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-052import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-053import 
org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-054import org.junit.AfterClass;
-055import org.junit.Assert;
-056import org.junit.Before;
-057import org.junit.BeforeClass;
-058import org.junit.ClassRule;
-059import org.junit.Test;
-060import 
org.junit.experimental.categories.Category;
-061import org.slf4j.Logger;
-062import org.slf4j.LoggerFactory;
-063
-064/**
-065 * Tests ReplicationSource and 
ReplicationEndpoint interactions
-066 */
-067@Category({ ReplicationTests.class, 
MediumTests.class })
-068public class TestReplicationEndpoint 
extends TestReplicationBase {
-069
-070  @ClassRule
-071  public static final HBaseClassTestRule 
CLASS_RULE =
-072  
HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationEndpoint.class);
-075
-076  static int numRegionServers;
-077
-078  @BeforeClass
-079  public static void setUpBeforeClass() 
throws Exception {
-080
TestReplicationBase.setUpBeforeClass();
-081numRegionServers = 
utility1.getHBaseCluster().getRegionServerThreads().size();
-082  }
-083
-084  @AfterClass
-085  public static void tearDownAfterClass() 
throws Exception {
-086
TestReplicationBase.tearDownAfterClass();
-087// check stop is called
-088
Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get()  0);
-089  }
-090
-091  @Before
-092  public void setup() throws Exception 
{
-093
ReplicationEndpointForTest.contructedCount.set(0);
-094
ReplicationEndpointForTest.startedCount.set(0);
-095
ReplicationEndpointForTest.replicateCount.set(0);
-096
ReplicationEndpointReturningFalse.replicated.set(false);
-097
ReplicationEndpointForTest.lastEntries = null;
-098final ListRegionServerThread 
rsThreads =
-099
utility1.getMiniHBaseCluster().getRegionServerThreads();
-100for (RegionServerThread rs : 
rsThreads) {
-101  
utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
-102}
-103// Wait for  all log roll to finish
-104utility1.waitFor(3000, new 
Waiter.ExplainingPredicateException() {
-105  @Override
-106  public boolean evaluate() throws 
Exception {
-107for (RegionServerThread rs : 
rsThreads) {
-108  if 
(!rs.getRegionServer().walRollRequestFinished()) {
-109return 

[01/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f50447c3e -> d220bc5e7


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[01/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c41a1fcb4 -> e0fb1fdea


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
index c878296..c14b16b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
@@ -33,478 +33,540 @@
 025import java.util.Map;
 026import 
java.util.concurrent.ExecutionException;
 027import 
java.util.concurrent.ForkJoinPool;
-028import 
java.util.concurrent.RecursiveTask;
-029import 
java.util.concurrent.atomic.AtomicBoolean;
-030import 
org.apache.hadoop.conf.Configuration;
-031import org.apache.hadoop.fs.FileStatus;
-032import org.apache.hadoop.fs.FileSystem;
-033import org.apache.hadoop.fs.Path;
-034import 
org.apache.hadoop.hbase.ScheduledChore;
-035import 
org.apache.hadoop.hbase.Stoppable;
-036import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-037import 
org.apache.hadoop.hbase.util.FSUtils;
-038import 
org.apache.hadoop.ipc.RemoteException;
-039import 
org.apache.yetus.audience.InterfaceAudience;
-040import org.slf4j.Logger;
-041import org.slf4j.LoggerFactory;
-042
-043import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-044import 
org.apache.hbase.thirdparty.com.google.common.base.Predicate;
-045import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-046import 
org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-047import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-048
-049/**
-050 * Abstract Cleaner that uses a chain of 
delegates to clean a directory of files
-051 * @param T Cleaner delegate 
class that is dynamically loaded from configuration
-052 */
-053@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
-054justification="TODO: Fix. It is wonky 
have static pool initialized from instance")
-055@InterfaceAudience.Private
-056public abstract class CleanerChoreT 
extends FileCleanerDelegate extends ScheduledChore
-057implements ConfigurationObserver {
-058
-059  private static final Logger LOG = 
LoggerFactory.getLogger(CleanerChore.class);
-060  private static final int 
AVAIL_PROCESSORS = Runtime.getRuntime().availableProcessors();
-061
-062  /**
-063   * If it is an integer and = 1, it 
would be the size;
-064   * if 0.0  size = 1.0, size 
would be available processors * size.
-065   * Pay attention that 1.0 is different 
from 1, former indicates it will use 100% of cores,
-066   * while latter will use only 1 thread 
for chore to scan dir.
-067   */
-068  public static final String 
CHORE_POOL_SIZE = "hbase.cleaner.scan.dir.concurrent.size";
-069  private static final String 
DEFAULT_CHORE_POOL_SIZE = "0.25";
-070
-071  // It may be waste resources for each 
cleaner chore own its pool,
-072  // so let's make pool for all cleaner 
chores.
-073  private static volatile ForkJoinPool 
CHOREPOOL;
-074  private static volatile int 
CHOREPOOLSIZE;
-075
-076  protected final FileSystem fs;
-077  private final Path oldFileDir;
-078  private final Configuration conf;
-079  protected final MapString, 
Object params;
-080  private final AtomicBoolean enabled = 
new AtomicBoolean(true);
-081  private final AtomicBoolean reconfig = 
new AtomicBoolean(false);
-082  protected ListT 
cleanersChain;
-083
-084  public CleanerChore(String name, final 
int sleepPeriod, final Stoppable s, Configuration conf,
-085  FileSystem fs, Path 
oldFileDir, String confKey) {
-086this(name, sleepPeriod, s, conf, fs, 
oldFileDir, confKey, null);
-087  }
-088
-089  /**
-090   * @param name name of the chore being 
run
-091   * @param sleepPeriod the period of 
time to sleep between each run
-092   * @param s the stopper
-093   * @param conf configuration to use
-094   * @param fs handle to the FS
-095   * @param oldFileDir the path to the 
archived files
-096   * @param confKey configuration key for 
the classes to instantiate
-097   * @param params members could be used 
in cleaner
-098   */
-099  public CleanerChore(String name, final 
int sleepPeriod, final Stoppable s, Configuration conf,
-100  FileSystem fs, Path oldFileDir, 
String confKey, MapString, Object params) {
-101super(name, s, sleepPeriod);
-102this.fs = fs;
-103this.oldFileDir = oldFileDir;
-104this.conf = conf;
-105this.params = params;
-106initCleanerChain(confKey);
-107
-108if (CHOREPOOL == null) {
-109  String poolSize = 
conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE);
-110  CHOREPOOLSIZE = 
calculatePoolSize(poolSize);
-111 

[01/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 82f99e1b9 -> 165414687


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/class-use/Append.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Append.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Append.html
index 73bfce7..cd91eb9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Append.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Append.html
@@ -238,9 +238,7 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-AsyncTable.append(Appendappend)
-Appends values to one or more columns within a single 
row.
-
+AsyncTableImpl.append(Appendappend)
 
 
 Result
@@ -249,16 +247,18 @@ service.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-AsyncTableImpl.append(Appendappend)
+Result
+HTable.append(Appendappend)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
-RawAsyncTableImpl.append(Appendappend)
+AsyncTable.append(Appendappend)
+Appends values to one or more columns within a single 
row.
+
 
 
-Result
-HTable.append(Appendappend)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureResult
+RawAsyncTableImpl.append(Appendappend)
 
 
 
@@ -325,13 +325,13 @@ service.
 
 
 Result
-Region.append(Appendappend)
-Perform one or more append operations on a row.
-
+HRegion.append(Appendappend)
 
 
 Result
-HRegion.append(Appendappend)
+Region.append(Appendappend)
+Perform one or more append operations on a row.
+
 
 
 Result



[01/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d6af018fe -> f1ebf5b6d


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
index 238fee7..dccdeeb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
@@ -152,27 +152,27 @@ the order they are declared.
 
 
 PeerProcedureInterface.PeerOperationType
-RefreshPeerProcedure.getPeerOperationType()
+DisablePeerProcedure.getPeerOperationType()
 
 
 PeerProcedureInterface.PeerOperationType
-DisablePeerProcedure.getPeerOperationType()
+RemovePeerProcedure.getPeerOperationType()
 
 
 PeerProcedureInterface.PeerOperationType
-UpdatePeerConfigProcedure.getPeerOperationType()
+EnablePeerProcedure.getPeerOperationType()
 
 
 PeerProcedureInterface.PeerOperationType
-AddPeerProcedure.getPeerOperationType()
+RefreshPeerProcedure.getPeerOperationType()
 
 
 PeerProcedureInterface.PeerOperationType
-EnablePeerProcedure.getPeerOperationType()
+AddPeerProcedure.getPeerOperationType()
 
 
 PeerProcedureInterface.PeerOperationType
-RemovePeerProcedure.getPeerOperationType()
+UpdatePeerConfigProcedure.getPeerOperationType()
 
 
 private static PeerProcedureInterface.PeerOperationType
@@ -188,6 +188,11 @@ the order they are declared.
 
 
 
+private void
+ModifyPeerProcedure.refreshPeer(MasterProcedureEnvenv,
+   PeerProcedureInterface.PeerOperationTypetype)
+
+
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType
 RefreshPeerProcedure.toPeerModificationType(PeerProcedureInterface.PeerOperationTypetype)
 



[01/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c30ef1dd2 -> 4dc2a2e85


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
index 23465a9..adce144 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
@@ -176,35 +176,35 @@
 
 
 Cacheable
-BlockCache.getBlock(BlockCacheKeycacheKey,
+CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
-Fetch block from cache.
-
+booleanupdateCacheMetrics)
 
 
 Cacheable
-LruBlockCache.getBlock(BlockCacheKeycacheKey,
+InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
-Get the buffer of the block with the specified name.
-
+booleanupdateCacheMetrics)
 
 
 Cacheable
-CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+BlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Fetch block from cache.
+
 
 
 Cacheable
-InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+LruBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Get the buffer of the block with the specified name.
+
 
 
 Cacheable
@@ -253,27 +253,39 @@
 
 
 void
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf)
+
+
+void
 BlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf)
 Add block to cache (defaults to not in-memory).
 
 
-
+
 void
 LruBlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf)
 Cache the block with the specified name and buffer.
 
 
-
+
 void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf)
 
+
+void
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf,
+  booleaninMemory)
+
 
 void
-MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf)
+InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf,
+  booleaninMemory)
 
 
 void
@@ -293,18 +305,6 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf,
-  booleaninMemory)
-
-
-void
-InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf,
-  booleaninMemory)
-
-
-void
 MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf,
   booleaninMemory)
@@ -315,6 +315,11 @@
  Cacheableright)
 
 
+void
+CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
+   Cacheableblock)
+
+
 default void
 BlockCache.returnBlock(BlockCacheKeycacheKey,
Cacheableblock)
@@ -322,11 +327,6 @@
  is over.
 
 
-
-void
-CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
-   Cacheableblock)
-
 
 
 
@@ -404,13 +404,19 @@
 
 
 Cacheable
+ByteBufferIOEngine.read(longoffset,
+intlength,
+CacheableDeserializerCacheabledeserializer)
+
+
+Cacheable
 FileIOEngine.read(longoffset,
 intlength,
 CacheableDeserializerCacheabledeserializer)
 Transfers data from file to the given byte buffer
 
 
-
+
 Cacheable
 IOEngine.read(longoffset,
 intlength,
@@ -418,12 +424,6 @@
 Transfers data from IOEngine to a Cacheable object.
 
 
-
-Cacheable
-ByteBufferIOEngine.read(longoffset,
-intlength,
-CacheableDeserializerCacheabledeserializer)
-
 
 Cacheable
 FileMmapEngine.read(longoffset,
@@ -492,13 +492,19 @@
 
 
 Cacheable
+ByteBufferIOEngine.read(longoffset,
+intlength,
+CacheableDeserializerCacheabledeserializer)
+
+
+Cacheable
 FileIOEngine.read(longoffset,
 intlength,
 CacheableDeserializerCacheabledeserializer)
 Transfers data from file to the given byte buffer
 
 
-
+
 Cacheable
 IOEngine.read(longoffset,
 intlength,
@@ -506,12 +512,6 @@
 Transfers data from IOEngine to a Cacheable object.
 
 
-
-Cacheable
-ByteBufferIOEngine.read(longoffset,
-intlength,
-CacheableDeserializerCacheabledeserializer)
-
 
 Cacheable
 FileMmapEngine.read(longoffset,



[01/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4e1efd6f6 -> 6eb695c8b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index 532d67b..f7d6df6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -139,9 +139,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
 org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
 org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
-org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
 
 
 



[01/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b5f95ca57 -> bd675fa38


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
index 93bb11a..6cd219e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
@@ -106,13 +106,13 @@
 098   float 
poolSizePercentage, float indexChunkSizePercentage,
 099   float 
initialCountPercentage,
 100   
HeapMemoryManager heapMemoryManager) {
-101this.dataChunksPool = 
initializePool(globalMemStoreSize,
+101this.dataChunksPool = 
initializePool("data", globalMemStoreSize,
 102(1 - 
indexChunkSizePercentage) * poolSizePercentage,
 103initialCountPercentage, 
chunkSize, heapMemoryManager);
 104// The index chunks pool is needed 
only when the index type is CCM.
 105// Since the pools are not created at 
all when the index type isn't CCM,
 106// we don't need to check it here.
-107this.indexChunksPool = 
initializePool(globalMemStoreSize,
+107this.indexChunksPool = 
initializePool("index", globalMemStoreSize,
 108indexChunkSizePercentage * 
poolSizePercentage,
 109initialCountPercentage, (int) 
(indexChunkSizePercentage * chunkSize),
 110heapMemoryManager);
@@ -339,313 +339,315 @@
 331private static final int 
statThreadPeriod = 60 * 5;
 332private final AtomicLong chunkCount = 
new AtomicLong();
 333private final LongAdder 
reusedChunkCount = new LongAdder();
-334
-335MemStoreChunkPool(int chunkSize, int 
maxCount, int initialCount, float poolSizePercentage) {
-336  this.chunkSize = chunkSize;
-337  this.maxCount = maxCount;
-338  this.poolSizePercentage = 
poolSizePercentage;
-339  this.reclaimedChunks = new 
LinkedBlockingQueue();
-340  for (int i = 0; i  
initialCount; i++) {
-341Chunk chunk = createChunk(true, 
CompactingMemStore.IndexType.ARRAY_MAP, chunkSize);
-342chunk.init();
-343reclaimedChunks.add(chunk);
-344  }
-345  chunkCount.set(initialCount);
-346  final String n = 
Thread.currentThread().getName();
-347  scheduleThreadPool = 
Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
-348  .setNameFormat(n + 
"-MemStoreChunkPool Statistics").setDaemon(true).build());
-349  
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(), 
statThreadPeriod,
-350  statThreadPeriod, 
TimeUnit.SECONDS);
-351}
-352
-353/**
-354 * Poll a chunk from the pool, reset 
it if not null, else create a new chunk to return if we have
-355 * not yet created max allowed chunks 
count. When we have already created max allowed chunks and
-356 * no free chunks as of now, return 
null. It is the responsibility of the caller to make a chunk
-357 * then.
-358 * Note: Chunks returned by this pool 
must be put back to the pool after its use.
-359 * @return a chunk
-360 * @see #putbackChunks(Chunk)
-361 */
-362Chunk getChunk() {
-363  return 
getChunk(CompactingMemStore.IndexType.ARRAY_MAP);
-364}
-365
-366Chunk 
getChunk(CompactingMemStore.IndexType chunkIndexType) {
-367  Chunk chunk = 
reclaimedChunks.poll();
-368  if (chunk != null) {
-369chunk.reset();
-370reusedChunkCount.increment();
-371  } else {
-372// Make a chunk iff we have not 
yet created the maxCount chunks
-373while (true) {
-374  long created = 
this.chunkCount.get();
-375  if (created  this.maxCount) 
{
-376if 
(this.chunkCount.compareAndSet(created, created + 1)) {
-377  chunk = 
createChunkForPool(chunkIndexType, chunkSize);
-378  break;
-379}
-380  } else {
-381break;
-382  }
-383}
-384  }
-385  return chunk;
-386}
-387
-388int getChunkSize() {
-389  return chunkSize;
-390}
-391
-392/**
-393 * Add the chunks to the pool, when 
the pool achieves the max size, it will skip the remaining
-394 * chunks
-395 * @param c
-396 */
-397private void putbackChunks(Chunk c) 
{
-398  int toAdd = this.maxCount - 
reclaimedChunks.size();
-399  if (c.isFromPool()  
c.size == chunkSize  toAdd  0) {
-400reclaimedChunks.add(c);
-401  } else {
-402// remove the chunk (that is not 
going to pool)
-403// though it is initially from 
the pool or not
-404

[01/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 291f0a4e9 -> 8b1eaec14


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
index b9b7a7e..f3af6db 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
@@ -68,289 +68,257 @@
 060String cpName = "a.b.c.d";
 061TableDescriptor htd
 062  = 
TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
-063.addCoprocessor(cpName)
-064.addCoprocessor(cpName)
+063.setCoprocessor(cpName)
+064.setCoprocessor(cpName)
 065.build();
 066  }
 067
 068  @Test
-069  public void 
testAddCoprocessorWithSpecStr() throws IOException {
-070String cpName = "a.b.c.d";
-071TableDescriptorBuilder builder
-072  = 
TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME);
-073
-074try {
-075  
builder.addCoprocessorWithSpec(cpName);
-076  fail();
-077} catch (IllegalArgumentException 
iae) {
-078  // Expected as cpName is invalid
-079}
-080
-081// Try minimal spec.
-082try {
-083  
builder.addCoprocessorWithSpec("file:///some/path" + "|" + cpName);
-084  fail();
-085} catch (IllegalArgumentException 
iae) {
-086  // Expected to be invalid
-087}
-088
-089// Try more spec.
-090String spec = 
"hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
-091try {
-092  
builder.addCoprocessorWithSpec(spec);
-093} catch (IllegalArgumentException 
iae) {
-094  fail();
-095}
-096
-097// Try double add of same 
coprocessor
-098try {
-099  
builder.addCoprocessorWithSpec(spec);
-100  fail();
-101} catch (IOException ioe) {
-102  // Expect that the coprocessor 
already exists
-103}
-104  }
-105
-106  @Test
-107  public void testPb() throws 
DeserializationException, IOException {
-108final int v = 123;
-109TableDescriptor htd
-110  = 
TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
-111  .setMaxFileSize(v)
-112  
.setDurability(Durability.ASYNC_WAL)
-113  .setReadOnly(true)
-114  .setRegionReplication(2)
-115  .build();
-116
-117byte [] bytes = 
TableDescriptorBuilder.toByteArray(htd);
-118TableDescriptor deserializedHtd = 
TableDescriptorBuilder.parseFrom(bytes);
-119assertEquals(htd, deserializedHtd);
-120assertEquals(v, 
deserializedHtd.getMaxFileSize());
-121
assertTrue(deserializedHtd.isReadOnly());
-122assertEquals(Durability.ASYNC_WAL, 
deserializedHtd.getDurability());
-123assertEquals(2, 
deserializedHtd.getRegionReplication());
-124  }
-125
-126  /**
-127   * Test cps in the table description
-128   * @throws Exception
-129   */
-130  @Test
-131  public void testGetSetRemoveCP() throws 
Exception {
-132// simple CP
-133String className = 
"org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
-134TableDescriptor desc
-135  = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
-136 .addCoprocessor(className) // 
add and check that it is present
-137.build();
-138
assertTrue(desc.hasCoprocessor(className));
-139desc = 
TableDescriptorBuilder.newBuilder(desc)
-140 .removeCoprocessor(className) // 
remove it and check that it is gone
-141.build();
-142
assertFalse(desc.hasCoprocessor(className));
-143  }
-144
-145  /**
-146   * Test cps in the table description
-147   * @throws Exception
-148   */
-149  @Test
-150  public void testSetListRemoveCP() 
throws Exception {
-151TableDescriptor desc
-152  = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
-153// Check that any coprocessor is 
present.
-154
assertTrue(desc.getCoprocessors().isEmpty());
-155
-156// simple CP
-157String className1 = 
"org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
-158String className2 = 
"org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver";
-159desc = 
TableDescriptorBuilder.newBuilder(desc)
-160.addCoprocessor(className1) 
// Add the 1 coprocessor and check if present.
-161.build();
-162
assertTrue(desc.getCoprocessors().size() == 1);
-163
assertTrue(desc.getCoprocessors().contains(className1));
-164
-165desc = 
TableDescriptorBuilder.newBuilder(desc)
-166// Add the 2nd coprocessor 
and check if present.
-167// remove it and check that 
it is gone
-168

[01/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 291a8cf98 -> a754d8956


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
new file mode 100644
index 000..d59a2b0
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
@@ -0,0 +1,497 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TableReplicationPeerStorage (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.replication
+Class 
TableReplicationPeerStorage
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.replication.TableReplicationStorageBase
+
+
+org.apache.hadoop.hbase.replication.TableReplicationPeerStorage
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+ReplicationPeerStorage
+
+
+
+@InterfaceAudience.Private
+public class TableReplicationPeerStorage
+extends TableReplicationStorageBase
+implements ReplicationPeerStorage
+Table based replication peer storage.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.replication.TableReplicationStorageBase
+conf,
 FAMILY_HFILE_REFS,
 FAMILY_PEER,
 FAMILY_QUEUE,
 FAMILY_REGIONS,
 FAMILY_RS_STATE,
 FAMILY_WAL,
 QU
 ALIFIER_PEER_CONFIG, QUALIFIER_PEER_STATE,
 QUALIFIER_STATE_ENABLED,
 REPLICATION_TABLE,
 zookeeper
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TableReplicationPeerStorage(ZKWatcherzookeeper,
+   
org.apache.hadoop.conf.Configurationconf)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+   ReplicationPeerConfigpeerConfig,
+   booleanenabled)
+Add a replication peer.
+
+
+
+ReplicationPeerConfig
+getPeerConfig(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
+Get the peer config of a replication peer.
+
+
+
+boolean
+isPeerEnabled(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
+Test whether a replication peer is enabled.
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+listPeerIds()
+Return the peer ids of all replication peers.
+
+
+
+private boolean
+peerExist(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+ Tabletable)
+
+
+void
+removePeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
+Remove a replication peer.
+
+
+
+void
+setPeerState(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+booleanenabled)
+Set the state of peer, true to 
ENABLED, otherwise to DISABLED.
+
+
+
+void
+updatePeerConfig(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+

[01/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cdf211463 -> 81cde4cee


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/LockedResourceType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/LockedResourceType.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/LockedResourceType.html
index ef71485..3fe2b14 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/LockedResourceType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/LockedResourceType.html
@@ -120,13 +120,13 @@
 LockAndQueuequeue)
 
 
-LockedResource
-MasterProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
+(package private) LockedResource
+SchemaLocking.getLockResource(LockedResourceTyperesourceType,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringresourceName)
 
 
-(package private) LockedResource
-SchemaLocking.getLockResource(LockedResourceTyperesourceType,
+LockedResource
+MasterProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringresourceName)
 
 
@@ -195,12 +195,12 @@ the order they are declared.
 
 
 LockedResource
-SimpleProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
+ProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringresourceName)
 
 
 LockedResource
-ProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
+SimpleProcedureScheduler.getLockResource(LockedResourceTyperesourceType,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringresourceName)
 
 



[01/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d4d9d9b4a -> d347bde82


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/master/RegionState.State.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/RegionState.State.html 
b/devapidocs/org/apache/hadoop/hbase/master/RegionState.State.html
index 136bc7e..29aabe8 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/RegionState.State.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/RegionState.State.html
@@ -382,7 +382,7 @@ the order they are declared.
 
 
 values
-public staticRegionState.State[]values()
+public staticRegionState.State[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -402,7 +402,7 @@ for (RegionState.State c : RegionState.State.values())
 
 
 valueOf
-public staticRegionState.StatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticRegionState.StatevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 



[01/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d77bb8707 -> 6b94a2f26


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
index e780f3e..d9048c2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
@@ -7,91 +7,91 @@
 
 
 001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
 019
-020package 
org.apache.hadoop.hbase.regionserver;
-021
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.InterruptedIOException;
-025import java.util.ArrayList;
-026import java.util.Collection;
-027import java.util.List;
-028import java.util.Optional;
-029import java.util.UUID;
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.fs.FSDataInputStream;
-032import 
org.apache.hadoop.fs.FSDataOutputStream;
-033import org.apache.hadoop.fs.FileStatus;
-034import org.apache.hadoop.fs.FileSystem;
-035import org.apache.hadoop.fs.FileUtil;
-036import 
org.apache.hadoop.fs.LocatedFileStatus;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.hadoop.fs.permission.FsPermission;
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.hadoop.hbase.PrivateCellUtil;
-042import 
org.apache.hadoop.hbase.backup.HFileArchiver;
-043import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-044import 
org.apache.hadoop.hbase.client.RegionInfo;
-045import 
org.apache.hadoop.hbase.client.TableDescriptor;
-046import 
org.apache.hadoop.hbase.fs.HFileSystem;
-047import 
org.apache.hadoop.hbase.io.Reference;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-050import 
org.apache.hadoop.hbase.util.FSUtils;
-051import 
org.apache.hadoop.hbase.util.Pair;
-052import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-058
-059import 
edu.umd.cs.findbugs.annotations.Nullable;
-060
-061/**
-062 * View to an on-disk Region.
-063 * Provides the set of methods necessary 
to interact with the on-disk region data.
-064 */
-065@InterfaceAudience.Private
-066public class HRegionFileSystem {
-067  private static final Logger LOG = 
LoggerFactory.getLogger(HRegionFileSystem.class);
-068
-069  /** Name of the region info file that 
resides just under the region directory. */
-070  public final static String 
REGION_INFO_FILE = ".regioninfo";
-071
-072  /** Temporary subdirectory of the 
region directory used for merges. */
-073  public static final String 
REGION_MERGES_DIR = ".merges";

[01/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 374a4b291 -> eb05e3e3b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/class-use/Cursor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Cursor.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/Cursor.html
index c91a828..15bc56d 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/Cursor.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/Cursor.html
@@ -120,7 +120,7 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCursor
+https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCursor
 AdvancedScanResultConsumer.ScanController.cursor()
 Get the scan cursor if available.
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e82a131d3 -> ec8bf7616


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/DelayedProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/DelayedProcedure.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/DelayedProcedure.html
new file mode 100644
index 000..bd92c85
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/DelayedProcedure.html
@@ -0,0 +1,100 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
+021import 
org.apache.yetus.audience.InterfaceAudience;
+022
+023@InterfaceAudience.Private
+024class DelayedProcedure extends 
DelayedUtil.DelayedContainerWithTimestampProcedure? {
+025  public 
DelayedProcedure(Procedure? procedure) {
+026super(procedure, 
procedure.getTimeoutTimestamp());
+027  }
+028}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 56564b90a -> 991224b95


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 1a47423..58a6306 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -1614,7 +1614,7 @@
 1606int storefiles = 0;
 1607int storeUncompressedSizeMB = 0;
 1608int storefileSizeMB = 0;
-1609int memstoreSizeMB = (int) 
(r.getMemStoreSize() / 1024 / 1024);
+1609int memstoreSizeMB = (int) 
(r.getMemStoreDataSize() / 1024 / 1024);
 1610long storefileIndexSizeKB = 0;
 1611int rootLevelIndexSizeKB = 0;
 1612int totalStaticIndexSizeKB = 0;
@@ -2751,11 +2751,11 @@
 2743  }
 2744
 2745  /**
-2746   * @return A new Map of online regions 
sorted by region size with the first entry being the
-2747   * biggest.  If two regions are the 
same size, then the last one found wins; i.e. this method
-2748   * may NOT return all regions.
+2746   * @return A new Map of online regions 
sorted by region off-heap size with the first entry being
+2747   *   the biggest.  If two regions are 
the same size, then the last one found wins; i.e. this
+2748   *   method may NOT return all 
regions.
 2749   */
-2750  SortedMapLong, HRegion 
getCopyOfOnlineRegionsSortedBySize() {
+2750  SortedMapLong, HRegion 
getCopyOfOnlineRegionsSortedByOffHeapSize() {
 2751// we'll sort the regions in 
reverse
 2752SortedMapLong, HRegion 
sortedRegions = new TreeMap(
 2753new ComparatorLong() {
@@ -2766,974 +2766,995 @@
 2758});
 2759// Copy over all regions. Regions 
are sorted by size with biggest first.
 2760for (HRegion region : 
this.onlineRegions.values()) {
-2761  
sortedRegions.put(region.getMemStoreSize(), region);
+2761  
sortedRegions.put(region.getMemStoreOffHeapSize(), region);
 2762}
 2763return sortedRegions;
 2764  }
 2765
 2766  /**
-2767   * @return time stamp in millis of 
when this region server was started
-2768   */
-2769  public long getStartcode() {
-2770return this.startcode;
-2771  }
-2772
-2773  /** @return reference to 
FlushRequester */
-2774  @Override
-2775  public FlushRequester 
getFlushRequester() {
-2776return this.cacheFlusher;
-2777  }
-2778
-2779  @Override
-2780  public CompactionRequester 
getCompactionRequestor() {
-2781return this.compactSplitThread;
-2782  }
-2783
-2784  /**
-2785   * Get the top N most loaded regions 
this server is serving so we can tell the
-2786   * master which regions it can 
reallocate if we're overloaded. TODO: actually
-2787   * calculate which regions are most 
loaded. (Right now, we're just grabbing
-2788   * the first N regions being served 
regardless of load.)
+2767   * @return A new Map of online regions 
sorted by region heap size with the first entry being the
+2768   *   biggest.  If two regions are the 
same size, then the last one found wins; i.e. this method
+2769   *   may NOT return all regions.
+2770   */
+2771  SortedMapLong, HRegion 
getCopyOfOnlineRegionsSortedByOnHeapSize() {
+2772// we'll sort the regions in 
reverse
+2773SortedMapLong, HRegion 
sortedRegions = new TreeMap(
+2774new ComparatorLong() {
+2775  @Override
+2776  public int compare(Long a, 
Long b) {
+2777return -1 * 
a.compareTo(b);
+2778  }
+2779});
+2780// Copy over all regions. Regions 
are sorted by size with biggest first.
+2781for (HRegion region : 
this.onlineRegions.values()) {
+2782  
sortedRegions.put(region.getMemStoreHeapSize(), region);
+2783}
+2784return sortedRegions;
+2785  }
+2786
+2787  /**
+2788   * @return time stamp in millis of 
when this region server was started
 2789   */
-2790  protected RegionInfo[] 
getMostLoadedRegions() {
-2791ArrayListRegionInfo regions 
= new ArrayList();
-2792for (Region r : 
onlineRegions.values()) {
-2793  if (!r.isAvailable()) {
-2794continue;
-2795  }
-2796  if (regions.size()  
numRegionsToReport) {
-2797
regions.add(r.getRegionInfo());
-2798  } else {
-2799break;
-2800  }
-2801}
-2802return regions.toArray(new 
RegionInfo[regions.size()]);
+2790  public long getStartcode() {
+2791return this.startcode;
+2792  }
+2793
+2794  /** @return reference to 
FlushRequester */
+2795  @Override
+2796  public FlushRequester 
getFlushRequester() {
+2797return this.cacheFlusher;
+2798  }
+2799
+2800  @Override
+2801  public CompactionRequester 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 93506d388 -> 193b42599


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
index 9bcea5f..1fde6ea 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static enum TestAsyncProcess.RR
+private static enum TestAsyncProcess.RR
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumTestAsyncProcess.RR
 After reading TheDailyWtf, I always wanted to create a 
MyBoolean enum like this!
 
@@ -217,7 +217,7 @@ the order they are declared.
 
 
 TRUE
-public static finalTestAsyncProcess.RR TRUE
+public static finalTestAsyncProcess.RR TRUE
 
 
 
@@ -226,7 +226,7 @@ the order they are declared.
 
 
 FALSE
-public static finalTestAsyncProcess.RR FALSE
+public static finalTestAsyncProcess.RR FALSE
 
 
 
@@ -235,7 +235,7 @@ the order they are declared.
 
 
 DONT_CARE
-public static finalTestAsyncProcess.RR DONT_CARE
+public static finalTestAsyncProcess.RR DONT_CARE
 
 
 
@@ -244,7 +244,7 @@ the order they are declared.
 
 
 FAILED
-public static finalTestAsyncProcess.RR FAILED
+public static finalTestAsyncProcess.RR FAILED
 
 
 
@@ -261,7 +261,7 @@ the order they are declared.
 
 
 values
-public staticTestAsyncProcess.RR[]values()
+public staticTestAsyncProcess.RR[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -281,7 +281,7 @@ for (TestAsyncProcess.RR c : TestAsyncProcess.RR.values())
 
 
 valueOf
-public staticTestAsyncProcess.RRvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticTestAsyncProcess.RRvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b8cb82420 -> 94208cfe6


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/RegionServerServices.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/RegionServerServices.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/RegionServerServices.html
index 48ae316..278cf20 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/RegionServerServices.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/RegionServerServices.html
@@ -154,11 +154,11 @@
 
 
 private RegionServerServices
-LogRollBackupSubprocedure.rss
+LogRollRegionServerProcedureManager.rss
 
 
 private RegionServerServices
-LogRollRegionServerProcedureManager.rss
+LogRollBackupSubprocedure.rss
 
 
 
@@ -294,15 +294,15 @@
 
 
 
+void
+RegionServerProcedureManagerHost.initialize(RegionServerServicesrss)
+
+
 abstract void
 RegionServerProcedureManager.initialize(RegionServerServicesrss)
 Initialize a globally barriered procedure for region 
servers.
 
 
-
-void
-RegionServerProcedureManagerHost.initialize(RegionServerServicesrss)
-
 
 
 
@@ -357,15 +357,15 @@
 
 
 private RegionServerServices
-QuotaCache.rsServices
+RegionServerSpaceQuotaManager.rsServices
 
 
 private RegionServerServices
-RegionServerRpcQuotaManager.rsServices
+QuotaCache.rsServices
 
 
 private RegionServerServices
-RegionServerSpaceQuotaManager.rsServices
+RegionServerRpcQuotaManager.rsServices
 
 
 
@@ -527,43 +527,43 @@
 
 
 
-private RegionServerServices
-CompactedHFilesDischarger.regionServerServices
-
-
 (package private) RegionServerServices
 RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.regionServerServices
 
+
+private RegionServerServices
+CompactedHFilesDischarger.regionServerServices
+
 
 private RegionServerServices
 RegionServerCoprocessorHost.rsServices
 
 
 (package private) RegionServerServices
-HRegion.rsServices
-
-
-(package private) RegionServerServices
 RegionCoprocessorHost.rsServices
 The region server services
 
 
-
+
 private RegionServerServices
 RegionCoprocessorHost.RegionEnvironmentForCoreCoprocessors.rsServices
 
+
+(package private) RegionServerServices
+HRegion.rsServices
+
 
 private RegionServerServices
 SplitLogWorker.server
 
 
-protected RegionServerServices
-LogRoller.services
-
-
 private RegionServerServices
 RegionServerCoprocessorHost.RegionServerEnvironment.services
 
+
+protected RegionServerServices
+LogRoller.services
+
 
 private RegionServerServices
 RegionCoprocessorHost.RegionEnvironment.services
@@ -582,13 +582,13 @@
 RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.getRegionServerServices()
 
 
-(package private) RegionServerServices
-HRegion.getRegionServerServices()
-
-
 RegionServerServices
 RegionCoprocessorHost.RegionEnvironmentForCoreCoprocessors.getRegionServerServices()
 
+
+(package private) RegionServerServices
+HRegion.getRegionServerServices()
+
 
 
 
@@ -788,13 +788,13 @@
 
 
 
-private RegionServerServices
-CloseRegionHandler.rsServices
-
-
 protected RegionServerServices
 OpenRegionHandler.rsServices
 
+
+private RegionServerServices
+CloseRegionHandler.rsServices
+
 
 private RegionServerServices
 OpenRegionHandler.PostOpenDeployTasksThread.services
@@ -929,19 +929,17 @@
 
 
 static ThroughputController
-CompactionThroughputControllerFactory.create(RegionServerServicesserver,
+FlushThroughputControllerFactory.create(RegionServerServicesserver,
   org.apache.hadoop.conf.Configurationconf)
 
 
 static ThroughputController
-FlushThroughputControllerFactory.create(RegionServerServicesserver,
+CompactionThroughputControllerFactory.create(RegionServerServicesserver,
   org.apache.hadoop.conf.Configurationconf)
 
 
 void
-ThroughputController.setup(RegionServerServicesserver)
-Setup controller for the given region server.
-
+PressureAwareFlushThroughputController.setup(RegionServerServicesserver)
 
 
 void
@@ -953,11 +951,13 @@
 
 
 void
-PressureAwareFlushThroughputController.setup(RegionServerServicesserver)
+NoLimitThroughputController.setup(RegionServerServicesserver)
 
 
 void
-NoLimitThroughputController.setup(RegionServerServicesserver)
+ThroughputController.setup(RegionServerServicesserver)
+Setup controller for the given region server.
+
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8f0525332 -> 0cd17dc53


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.Queue.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.Queue.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.Queue.html
deleted file mode 100644
index 82665bf..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.Queue.html
+++ /dev/null
@@ -1,573 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-MasterProcedureScheduler.Queue (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":6,"i11":10,"i12":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.procedure
-Class 
MasterProcedureScheduler.QueueTKey extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTKey
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.util.AvlUtil.AvlNodeTNode
-
-
-org.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNodeMasterProcedureScheduler.QueueTKey
-
-
-org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.QueueTKey
-
-
-
-
-
-
-
-
-
-
-
-Direct Known Subclasses:
-MasterProcedureScheduler.PeerQueue,
 MasterProcedureScheduler.ServerQueue,
 MasterProcedureScheduler.TableQueue
-
-
-Enclosing class:
-MasterProcedureScheduler
-
-
-
-private abstract static class MasterProcedureScheduler.QueueTKey
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTKey
-extends AvlUtil.AvlLinkedNodeMasterProcedureScheduler.QueueTKey
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private TKey
-key
-
-
-private LockStatus
-lockStatus
-
-
-private int
-priority
-
-
-private ProcedureDeque
-runnables
-
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNode
-iterNext,
 iterPrev
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.util.AvlUtil.AvlNode
-avlHeight,
 avlLeft,
 avlRight
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-Queue(TKeykey,
- intpriority,
- LockStatuslockStatus)
-
-
-Queue(TKeykey,
- LockStatuslockStatus)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsAbstract MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-add(Procedureproc,
-   booleanaddToFront)
-
-
-int
-compareKey(TKeycmpKey)
-
-
-int
-compareTo(MasterProcedureScheduler.QueueTKeyother)
-
-
-protected TKey
-getKey()
-
-
-protected LockStatus
-getLockStatus()
-
-
-protected int
-getPriority()
-
-
-boolean
-isAvailable()
-
-
-boolean
-isEmpty()
-
-
-Procedure
-peek()
-
-
-Procedure
-poll()
-
-
-(package private) abstract boolean
-requireExclusiveLock(Procedureproc)
-
-
-int
-size()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-toString()
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c4828151c -> 828486ae9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
index 75077a6..5da3ef8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
@@ -521,33 +521,33 @@
 
 
 
-org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearCompactedFiles()
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 DefaultStoreFileManager.clearCompactedFiles()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 StoreFileManager.clearCompactedFiles()
 Clears all the compacted files and returns them.
 
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearFiles()
+StripeStoreFileManager.clearCompactedFiles()
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
 DefaultStoreFileManager.clearFiles()
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
 StoreFileManager.clearFiles()
 Clears all the files currently in use and returns 
them.
 
 
+
+org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
+StripeStoreFileManager.clearFiles()
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHStoreFile
 HRegion.close()
@@ -597,36 +597,36 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
- for details on this methods.
-
+DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+Gets initial, full list of candidate store files to check 
for row-key-before.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-Gets initial, full list of candidate store files to check 
for row-key-before.
+StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
+ for details on this methods.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getCompactedfiles()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 DefaultStoreFileManager.getCompactedfiles()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 StoreFileManager.getCompactedfiles()
 List of compacted files inside this store that needs to be 
excluded in reads
  because further new reads will be using only the newly created files out of 
compaction.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
+StripeStoreFileManager.getCompactedfiles()
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 HStore.getCompactedFiles()
@@ -637,26 +637,26 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getFilesForScan(byte[]startRow,
+DefaultStoreFileManager.getFilesForScan(byte[]startRow,
booleanincludeStartRow,
byte[]stopRow,
booleanincludeStopRow)
 
 
 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 5ffe2146b -> f272b0e8f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index f82f773..b99a6b3 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -197,8 +197,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
+org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 833299a1e -> c83a37c86


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
index 75077a6..5da3ef8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
@@ -521,33 +521,33 @@
 
 
 
-org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearCompactedFiles()
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 DefaultStoreFileManager.clearCompactedFiles()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 StoreFileManager.clearCompactedFiles()
 Clears all the compacted files and returns them.
 
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
-StripeStoreFileManager.clearFiles()
+StripeStoreFileManager.clearCompactedFiles()
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
 DefaultStoreFileManager.clearFiles()
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
 StoreFileManager.clearFiles()
 Clears all the files currently in use and returns 
them.
 
 
+
+org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
+StripeStoreFileManager.clearFiles()
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHStoreFile
 HRegion.close()
@@ -597,36 +597,36 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
- for details on this methods.
-
+DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+Gets initial, full list of candidate store files to check 
for row-key-before.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorHStoreFile
-StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-Gets initial, full list of candidate store files to check 
for row-key-before.
+StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
+See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
+ for details on this methods.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getCompactedfiles()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 DefaultStoreFileManager.getCompactedfiles()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 StoreFileManager.getCompactedfiles()
 List of compacted files inside this store that needs to be 
excluded in reads
  because further new reads will be using only the newly created files out of 
compaction.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
+StripeStoreFileManager.getCompactedfiles()
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
 HStore.getCompactedFiles()
@@ -637,26 +637,26 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFile
-StripeStoreFileManager.getFilesForScan(byte[]startRow,
+DefaultStoreFileManager.getFilesForScan(byte[]startRow,
booleanincludeStartRow,
byte[]stopRow,
booleanincludeStopRow)
 
 
 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b10ff3d65 -> 0ab8335ec


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStore.html
index c3bbe29..98b6ace 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStore.html
@@ -172,14 +172,6 @@
 
 
 
-protected HStore
-StoreScanner.store
-
-
-protected HStore
-StoreFlusher.store
-
-
 private HStore
 CompactSplit.CompactionRunner.store
 
@@ -188,6 +180,14 @@
 CompactedHFilesDischargeHandler.store
 
 
+protected HStore
+StoreFlusher.store
+
+
+protected HStore
+StoreScanner.store
+
+
 private HStore
 CompactingMemStore.store
 
@@ -274,13 +274,13 @@
 FlushAllStoresPolicy.selectStoresToFlush()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStore
-FlushAllLargeStoresPolicy.selectStoresToFlush()
-
-
 abstract http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStore
 FlushPolicy.selectStoresToFlush()
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStore
+FlushAllLargeStoresPolicy.selectStoresToFlush()
+
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStore
 FlushNonSloppyStoresFirstPolicy.selectStoresToFlush()
@@ -351,21 +351,21 @@
 
 
 protected void
-StripeStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
+DateTieredStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
 HStorestore,
-CellComparatorcomparator)
+CellComparatorkvComparator)
 
 
 protected void
-DateTieredStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
+DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
 HStorestore,
 CellComparatorkvComparator)
 
 
 protected void
-DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
+StripeStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
 HStorestore,
-CellComparatorkvComparator)
+CellComparatorcomparator)
 
 
 private void
@@ -595,11 +595,11 @@
 
 
 protected boolean
-FlushLargeStoresPolicy.shouldFlush(HStorestore)
+FlushAllLargeStoresPolicy.shouldFlush(HStorestore)
 
 
 protected boolean
-FlushAllLargeStoresPolicy.shouldFlush(HStorestore)
+FlushLargeStoresPolicy.shouldFlush(HStorestore)
 
 
 (package private) boolean
@@ -609,12 +609,6 @@
 
 
 boolean
-StoreFileScanner.shouldUseScanner(Scanscan,
-HStorestore,
-longoldestUnexpiredTS)
-
-
-boolean
 KeyValueScanner.shouldUseScanner(Scanscan,
 HStorestore,
 longoldestUnexpiredTS)
@@ -622,7 +616,7 @@
  want to use based on criteria such as Bloom filters and timestamp 
ranges.
 
 
-
+
 boolean
 SegmentScanner.shouldUseScanner(Scanscan,
 HStorestore,
@@ -631,12 +625,18 @@
  MemStoreScanner, currently returns true as default.
 
 
-
+
 boolean
 NonLazyKeyValueScanner.shouldUseScanner(Scanscan,
 HStorestore,
 longoldestUnexpiredTS)
 
+
+boolean
+StoreFileScanner.shouldUseScanner(Scanscan,
+HStorestore,
+longoldestUnexpiredTS)
+
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 250fddb76 -> 6674e3ab7


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
index 2939a56..681e263 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * pSplitLogManager monitors the 
tasks that it creates using the
-068 * timeoutMonitor thread. If a task's 
progress is slow then
-069 * {@link 
SplitLogManagerCoordination#checkTasks} will take away the
-070 * task from the owner {@link 
org.apache.hadoop.hbase.regionserver.SplitLogWorker}
-071 * and the task will be up for grabs 
again. When the task is done then it is
-072 * deleted by SplitLogManager.
-073 *
-074 * pClients call {@link 
#splitLogDistributed(Path)} to split a region server's
-075 * log files. The caller thread waits in 
this method until all the log files
-076 * have been split.
-077 *
-078 * pAll the coordination calls 
made by this class are asynchronous. This is mainly
-079 * to help reduce response time seen by 
the callers.
-080 *
-081 * pThere is race in this design 
between the SplitLogManager and the
-082 * SplitLogWorker. SplitLogManager might 
re-queue a task that has in reality
-083 * already been completed by a 
SplitLogWorker. We rely on the idempotency of
-084 * the log splitting task for 
correctness.
-085 *
-086 * pIt is also assumed that every 
log splitting task is unique and once
-087 * completed (either with success or with 
error) it will be not be submitted
-088 * again. If a task is resubmitted then 
there is a risk that old "delete task"
-089 * can delete the re-submission.
-090 */
-091@InterfaceAudience.Private
-092public class SplitLogManager {
-093  private static final Logger LOG = 
LoggerFactory.getLogger(SplitLogManager.class);
-094
-095  private final MasterServices server;
-096
-097  private final Configuration conf;
-098  private final ChoreService 
choreService;
-099
-100  public static final int 
DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
-101
-102  private long unassignedTimeout;
-103  private long lastTaskCreateTime = 
Long.MAX_VALUE;
-104
-105  @VisibleForTesting
-106  final ConcurrentMapString, Task 
tasks = new ConcurrentHashMap();
-107  private TimeoutMonitor 
timeoutMonitor;
-108
-109  private volatile SetServerName 
deadWorkers = null;
-110  private final Object deadWorkersLock = 
new Object();
-111
-112  /**
-113   * Its OK to construct this object even 
when region-servers are not online. It does lookup the
-114   * orphan tasks in coordination engine 
but it doesn't block waiting for them to be done.
-115   * @param master the master services
-116   * @param conf the HBase 
configuration
-117   * @throws IOException
-118   */
-119  public SplitLogManager(MasterServices 
master, Configuration conf)
-120  throws IOException {
-121this.server = master;
-122this.conf = conf;
-123this.choreService = new 
ChoreService(master.getServerName() + "_splitLogManager_");
-124if 
(server.getCoordinatedStateManager() != null) {
-125  SplitLogManagerCoordination 
coordination = getSplitLogManagerCoordination();
-126  SetString failedDeletions = 
Collections.synchronizedSet(new HashSetString());
-127  SplitLogManagerDetails details = 
new SplitLogManagerDetails(tasks, master, failedDeletions);
-128  coordination.setDetails(details);
-129  coordination.init();
-130}
-131this.unassignedTimeout =
-132
conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
DEFAULT_UNASSIGNED_TIMEOUT);
-133this.timeoutMonitor =
-134new 
TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
1000),
-135master);
-136
choreService.scheduleChore(timeoutMonitor);
-137  }
-138
-139  private SplitLogManagerCoordination 
getSplitLogManagerCoordination() {
-140return 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8e7f106a1 -> 1f2eeb225


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index b8e6dfa..7b512ba 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -28,8473 +28,8472 @@
 020import static 
org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
 021import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
 022import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-023import java.io.EOFException;
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.io.InterruptedIOException;
-027import java.lang.reflect.Constructor;
-028import java.nio.ByteBuffer;
-029import 
java.nio.charset.StandardCharsets;
-030import java.text.ParseException;
-031import java.util.AbstractList;
-032import java.util.ArrayList;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Collections;
-036import java.util.HashMap;
-037import java.util.HashSet;
-038import java.util.Iterator;
-039import java.util.List;
-040import java.util.Map;
-041import java.util.Map.Entry;
-042import java.util.NavigableMap;
-043import java.util.NavigableSet;
-044import java.util.Optional;
-045import java.util.RandomAccess;
-046import java.util.Set;
-047import java.util.TreeMap;
-048import java.util.UUID;
-049import java.util.concurrent.Callable;
-050import 
java.util.concurrent.CompletionService;
-051import 
java.util.concurrent.ConcurrentHashMap;
-052import 
java.util.concurrent.ConcurrentMap;
-053import 
java.util.concurrent.ConcurrentSkipListMap;
-054import 
java.util.concurrent.ExecutionException;
-055import 
java.util.concurrent.ExecutorCompletionService;
-056import 
java.util.concurrent.ExecutorService;
-057import java.util.concurrent.Executors;
-058import java.util.concurrent.Future;
-059import java.util.concurrent.FutureTask;
-060import 
java.util.concurrent.ThreadFactory;
-061import 
java.util.concurrent.ThreadPoolExecutor;
-062import java.util.concurrent.TimeUnit;
-063import 
java.util.concurrent.TimeoutException;
-064import 
java.util.concurrent.atomic.AtomicBoolean;
-065import 
java.util.concurrent.atomic.AtomicInteger;
-066import 
java.util.concurrent.atomic.AtomicLong;
-067import 
java.util.concurrent.atomic.LongAdder;
-068import java.util.concurrent.locks.Lock;
-069import 
java.util.concurrent.locks.ReadWriteLock;
-070import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-071import java.util.function.Function;
-072
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.HRegionInfo;
-093import 
org.apache.hadoop.hbase.KeyValue;
-094import 
org.apache.hadoop.hbase.KeyValueUtil;
-095import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-096import 
org.apache.hadoop.hbase.NotServingRegionException;
-097import 
org.apache.hadoop.hbase.PrivateCellUtil;
-098import 
org.apache.hadoop.hbase.RegionTooBusyException;
-099import 
org.apache.hadoop.hbase.TableName;
-100import org.apache.hadoop.hbase.Tag;
-101import org.apache.hadoop.hbase.TagUtil;
-102import 
org.apache.hadoop.hbase.UnknownScannerException;
-103import 
org.apache.hadoop.hbase.client.Append;
-104import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-105import 
org.apache.hadoop.hbase.client.CompactionState;
-106import 
org.apache.hadoop.hbase.client.Delete;
-107import 
org.apache.hadoop.hbase.client.Durability;
-108import 
org.apache.hadoop.hbase.client.Get;
-109import 
org.apache.hadoop.hbase.client.Increment;
-110import 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b58a219d1 -> cc6597ecc


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.html
index ebec646..d65a17f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.html
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestFromClientSideScanExcpetionWithCoprocessor
+public class TestFromClientSideScanExcpetionWithCoprocessor
 extends TestFromClientSideScanExcpetion
 Test all client operations with a coprocessor that just 
implements the default flush/compact/scan
  policy.
@@ -145,6 +145,17 @@ extends 
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
 
 
 
@@ -210,6 +221,23 @@ extends 
 
 
+
+
+
+
+
+Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
+
+
 
 
 
@@ -222,7 +250,7 @@ extends 
 
 TestFromClientSideScanExcpetionWithCoprocessor
-publicTestFromClientSideScanExcpetionWithCoprocessor()
+publicTestFromClientSideScanExcpetionWithCoprocessor()
 
 
 
@@ -239,7 +267,7 @@ extends 
 
 setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -301,13 +329,13 @@ extends 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f9958bb15 -> aa7ffc92b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
index 202fee6..93f49d5 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-27
+  Last Published: 
2018-01-28
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 7a00907bf -> 96e5e102a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRegionScanner.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRegionScanner.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRegionScanner.html
index 281c243..1a84ee1 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRegionScanner.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRegionScanner.html
@@ -152,433 +152,461 @@
 144
 145  /**
 146   * Make puts to put the input value 
into each combination of row, family, and qualifier
-147   * @param rows
-148   * @param families
-149   * @param qualifiers
-150   * @param value
-151   * @return
-152   * @throws IOException
-153   */
-154  static ArrayListPut 
createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
-155  byte[] value) throws IOException 
{
-156Put put;
-157ArrayListPut puts = new 
ArrayList();
-158
-159for (int row = 0; row  
rows.length; row++) {
-160  put = new Put(rows[row]);
-161  for (int fam = 0; fam  
families.length; fam++) {
-162for (int qual = 0; qual  
qualifiers.length; qual++) {
-163  KeyValue kv = new 
KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
-164  put.add(kv);
-165}
-166  }
-167  puts.add(put);
-168}
-169
-170return puts;
-171  }
-172
-173  @AfterClass
-174  public static void tearDownAfterClass() 
throws Exception {
-175TEST_UTIL.shutdownMiniCluster();
-176  }
-177
-178  @Before
-179  public void setupBeforeTest() throws 
Exception {
-180disableSleeping();
-181  }
-182
-183  @After
-184  public void teardownAfterTest() throws 
Exception {
-185disableSleeping();
-186  }
-187
-188  /**
-189   * Run the test callable when 
heartbeats are enabled/disabled. We expect all tests to only pass
-190   * when heartbeat messages are enabled 
(otherwise the test is pointless). When heartbeats are
-191   * disabled, the test should throw an 
exception.
-192   * @param testCallable
-193   * @throws InterruptedException
-194   */
-195  private void 
testImportanceOfHeartbeats(CallableVoid testCallable) throws 
InterruptedException {
-196
HeartbeatRPCServices.heartbeatsEnabled = true;
-197
+147   */
+148  static ArrayListPut 
createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
+149  byte[] value) throws IOException 
{
+150Put put;
+151ArrayListPut puts = new 
ArrayList();
+152
+153for (int row = 0; row  
rows.length; row++) {
+154  put = new Put(rows[row]);
+155  for (int fam = 0; fam  
families.length; fam++) {
+156for (int qual = 0; qual  
qualifiers.length; qual++) {
+157  KeyValue kv = new 
KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
+158  put.add(kv);
+159}
+160  }
+161  puts.add(put);
+162}
+163
+164return puts;
+165  }
+166
+167  @AfterClass
+168  public static void tearDownAfterClass() 
throws Exception {
+169TEST_UTIL.shutdownMiniCluster();
+170  }
+171
+172  @Before
+173  public void setupBeforeTest() throws 
Exception {
+174disableSleeping();
+175  }
+176
+177  @After
+178  public void teardownAfterTest() throws 
Exception {
+179disableSleeping();
+180  }
+181
+182  /**
+183   * Run the test callable when 
heartbeats are enabled/disabled. We expect all tests to only pass
+184   * when heartbeat messages are enabled 
(otherwise the test is pointless). When heartbeats are
+185   * disabled, the test should throw an 
exception.
+186   */
+187  private void 
testImportanceOfHeartbeats(CallableVoid testCallable) throws 
InterruptedException {
+188
HeartbeatRPCServices.heartbeatsEnabled = true;
+189
+190try {
+191  testCallable.call();
+192} catch (Exception e) {
+193  fail("Heartbeat messages are 
enabled, exceptions should NOT be thrown. Exception trace:"
+194  + 
ExceptionUtils.getStackTrace(e));
+195}
+196
+197
HeartbeatRPCServices.heartbeatsEnabled = false;
 198try {
 199  testCallable.call();
 200} catch (Exception e) {
-201  fail("Heartbeat messages are 
enabled, exceptions should NOT be thrown. Exception trace:"
-202  + 
ExceptionUtils.getStackTrace(e));
-203}
-204
-205
HeartbeatRPCServices.heartbeatsEnabled = false;
-206try {
-207  testCallable.call();
-208} catch (Exception e) {
-209  return;
-210} finally {
-211  
HeartbeatRPCServices.heartbeatsEnabled = true;
-212}
-213fail("Heartbeats messages are 
disabled, an exception 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 08b2580fb -> 8118541fa


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html
index adf50f9..51a1acc 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.html
@@ -269,7 +269,7 @@ extends 
 
 getSizeToCheck
-protectedlonggetSizeToCheck(inttableRegionsCount)
+protectedlonggetSizeToCheck(inttableRegionsCount)
 
 Overrides:
 getSizeToCheckin
 classIncreasingToUpperBoundRegionSplitPolicy



[01/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 46b01e3f3 -> 14db89d7c


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.SocketTimeoutRsExecutor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.SocketTimeoutRsExecutor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.SocketTimeoutRsExecutor.html
index f1db5ca..d8515d7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.SocketTimeoutRsExecutor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.SocketTimeoutRsExecutor.html
@@ -32,813 +32,820 @@
 024import static org.junit.Assert.fail;
 025
 026import java.io.IOException;
-027import java.net.SocketTimeoutException;
-028import java.util.NavigableMap;
-029import java.util.Random;
-030import java.util.Set;
-031import java.util.SortedSet;
-032import 
java.util.concurrent.ConcurrentSkipListMap;
-033import 
java.util.concurrent.ConcurrentSkipListSet;
-034import 
java.util.concurrent.ExecutionException;
-035import java.util.concurrent.Executors;
-036import java.util.concurrent.Future;
-037import 
java.util.concurrent.ScheduledExecutorService;
-038import java.util.concurrent.TimeUnit;
-039
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-042import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-043import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-044import 
org.apache.hadoop.hbase.NotServingRegionException;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.TableName;
-047import 
org.apache.hadoop.hbase.client.RegionInfo;
-048import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-049import 
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-050import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-051import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-052import 
org.apache.hadoop.hbase.master.MasterServices;
-053import 
org.apache.hadoop.hbase.master.RegionState.State;
-054import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-055import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-056import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
-057import 
org.apache.hadoop.hbase.procedure2.Procedure;
-058import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-059import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-060import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-061import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-062import 
org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-063import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-064import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-065import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import 
org.apache.hadoop.ipc.RemoteException;
-069import org.junit.After;
-070import org.junit.Before;
-071import org.junit.Ignore;
-072import org.junit.Rule;
-073import org.junit.Test;
-074import 
org.junit.experimental.categories.Category;
-075import 
org.junit.rules.ExpectedException;
-076import org.junit.rules.TestName;
-077import org.junit.rules.TestRule;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-092
-093@Category({MasterTests.class, 
MediumTests.class})

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ba96e306f -> 0b638133a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html
deleted file mode 100644
index eaa3d11..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html
+++ /dev/null
@@ -1,1543 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/*
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package org.apache.hadoop.hbase.client;
-019
-020import static 
org.junit.Assert.assertEquals;
-021import static 
org.junit.Assert.assertFalse;
-022import static 
org.junit.Assert.assertNotNull;
-023import static 
org.junit.Assert.assertNull;
-024import static 
org.junit.Assert.assertTrue;
-025import static org.junit.Assert.fail;
-026
-027import java.io.IOException;
-028import java.lang.reflect.Field;
-029import java.lang.reflect.Modifier;
-030import java.net.SocketTimeoutException;
-031import java.util.ArrayList;
-032import java.util.List;
-033import java.util.Optional;
-034import java.util.Random;
-035import 
java.util.concurrent.ExecutorService;
-036import 
java.util.concurrent.SynchronousQueue;
-037import 
java.util.concurrent.ThreadPoolExecutor;
-038import java.util.concurrent.TimeUnit;
-039import 
java.util.concurrent.atomic.AtomicBoolean;
-040import 
java.util.concurrent.atomic.AtomicInteger;
-041import 
java.util.concurrent.atomic.AtomicLong;
-042import 
java.util.concurrent.atomic.AtomicReference;
-043
-044import 
org.apache.hadoop.conf.Configuration;
-045import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-046import org.apache.hadoop.hbase.Cell;
-047import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.RegionLocations;
-052import 
org.apache.hadoop.hbase.ServerName;
-053import 
org.apache.hadoop.hbase.TableName;
-054import org.apache.hadoop.hbase.Waiter;
-055import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-056import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-057import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-058import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-059import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-060import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-061import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-062import 
org.apache.hadoop.hbase.filter.Filter;
-063import 
org.apache.hadoop.hbase.filter.FilterBase;
-064import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-067import 
org.apache.hadoop.hbase.master.HMaster;
-068import 
org.apache.hadoop.hbase.regionserver.HRegion;
-069import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-070import 
org.apache.hadoop.hbase.regionserver.Region;
-071import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-072import 
org.apache.hadoop.hbase.wal.WALEdit;
-073import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-074import 
org.apache.hadoop.hbase.util.Bytes;
-075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-076import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
-077import 
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-078import 
org.apache.hadoop.hbase.util.Threads;
-079import org.junit.AfterClass;
-080import org.junit.Assert;
-081import org.junit.BeforeClass;
-082import org.junit.Ignore;
-083import org.junit.Rule;
-084import org.junit.Test;
-085import 
org.junit.experimental.categories.Category;
-086import org.junit.rules.TestName;
-087import org.junit.rules.TestRule;
-088import 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 096cff083 -> f183e80f4


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
index 744f161..17918aa 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
@@ -275,7 +275,7 @@ implements org.apache.hadoop.util.Tool
 closeRegion(org.apache.hadoop.hbase.regionserver.HRegionregion)
 
 
-private static 
org.apache.hadoop.hbase.HTableDescriptor
+private static 
org.apache.hadoop.hbase.client.TableDescriptor
 createHTableDescriptor(intregionNum,
   intnumFamilies)
 
@@ -298,9 +298,9 @@ implements org.apache.hadoop.util.Tool
 
 
 private 
org.apache.hadoop.hbase.regionserver.HRegion
-openRegion(org.apache.hadoop.fs.FileSystemfs,
+openRegion(org.apache.hadoop.fs.FileSystemfs,
   org.apache.hadoop.fs.Pathdir,
-  org.apache.hadoop.hbase.HTableDescriptorhtd,
+  org.apache.hadoop.hbase.client.TableDescriptorhtd,
   org.apache.hadoop.hbase.wal.WALFactorywals,
   longwhenToRoll,
   
org.apache.hadoop.hbase.regionserver.LogRollerroller)
@@ -526,7 +526,7 @@ implements org.apache.hadoop.util.Tool
 
 
 walsListenedTo
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.wal.WAL walsListenedTo
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.wal.WAL walsListenedTo
 
 
 
@@ -575,7 +575,7 @@ implements org.apache.hadoop.util.Tool
 
 
 run
-publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Specified by:
@@ -591,8 +591,8 @@ implements org.apache.hadoop.util.Tool
 
 
 createHTableDescriptor
-private staticorg.apache.hadoop.hbase.HTableDescriptorcreateHTableDescriptor(intregionNum,
-   
intnumFamilies)
+private 
staticorg.apache.hadoop.hbase.client.TableDescriptorcreateHTableDescriptor(intregionNum,
+   
  intnumFamilies)
 
 
 
@@ -601,7 +601,7 @@ implements org.apache.hadoop.util.Tool
 
 
 verify
-privatelongverify(org.apache.hadoop.hbase.wal.WALFactorywals,
+privatelongverify(org.apache.hadoop.hbase.wal.WALFactorywals,
 org.apache.hadoop.fs.Pathwal,
 booleanverbose)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -624,7 +624,7 @@ implements org.apache.hadoop.util.Tool
 
 
 logBenchmarkResult
-private staticvoidlogBenchmarkResult(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+private staticvoidlogBenchmarkResult(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
longnumTests,
longtotalTime)
 
@@ -635,18 +635,18 @@ implements org.apache.hadoop.util.Tool
 
 
 printUsageAndExit
-privatevoidprintUsageAndExit()
+privatevoidprintUsageAndExit()
 
 
-
+
 
 
 
 
 openRegion
-privateorg.apache.hadoop.hbase.regionserver.HRegionopenRegion(org.apache.hadoop.fs.FileSystemfs,
+privateorg.apache.hadoop.hbase.regionserver.HRegionopenRegion(org.apache.hadoop.fs.FileSystemfs,
 
org.apache.hadoop.fs.Pathdir,
-
org.apache.hadoop.hbase.HTableDescriptorhtd,
+
org.apache.hadoop.hbase.client.TableDescriptorhtd,
 
org.apache.hadoop.hbase.wal.WALFactorywals,
 
longwhenToRoll,
 
org.apache.hadoop.hbase.regionserver.LogRollerroller)
@@ -663,7 +663,7 @@ implements 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site fdee382ad -> 49431b18a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 3dbd57e..69b0295 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -98,6 +98,10 @@
 org.apache.hadoop.hbase.master.procedure
 
 
+
+org.apache.hadoop.hbase.master.replication
+
+
 
 
 
@@ -135,6 +139,10 @@
 ProcedureExecutorMasterProcedureEnv
 MasterServices.getMasterProcedureExecutor()
 
+
+private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
+HMaster.getRemoteProcedure(longprocId)
+
 
 
 
@@ -524,162 +532,165 @@
ServerNameserverName)
 
 
-void
-RegionTransitionProcedure.remoteCallCompleted(MasterProcedureEnvenv,
-   ServerNameserverName,
-   RemoteProcedureDispatcher.RemoteOperationresponse)
-
-
 protected boolean
 UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 
java.io">IOExceptionexception)
 
-
+
 protected abstract boolean
 RegionTransitionProcedure.remoteCallFailed(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 
java.io">IOExceptionexception)
 
-
+
 protected boolean
 AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 
java.io">IOExceptionexception)
 
-
+
 void
 RegionTransitionProcedure.remoteCallFailed(MasterProcedureEnvenv,
 ServerNameserverName,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 
java.io">IOExceptionexception)
 
+
+void
+RegionTransitionProcedure.remoteOperationCompleted(MasterProcedureEnvenv)
+
 
+void
+RegionTransitionProcedure.remoteOperationFailed(MasterProcedureEnvenv,
+ RemoteProcedureExceptionerror)
+
+
 protected void
 UnassignProcedure.reportTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
 longseqId)
 
-
+
 protected abstract void
 RegionTransitionProcedure.reportTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
 longseqId)
 
-
+
 protected void
 AssignProcedure.reportTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
 longopenSeqNum)
 
-
+
 protected void
 RegionTransitionProcedure.reportTransition(MasterProcedureEnvenv,
 ServerNameserverName,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
 longseqId)
 
-
+
 protected void
 RegionTransitionProcedure.rollback(MasterProcedureEnvenv)
 
-
+
 private void
 MergeTableRegionsProcedure.rollbackCloseRegionsForMerge(MasterProcedureEnvenv)
 Rollback close regions
 
 
-
+
 protected void
 GCMergedRegionsProcedure.rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsStatestate)
 
-
+
 protected void
 GCRegionProcedure.rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCRegionStatestate)
 
-
+
 protected void
 MergeTableRegionsProcedure.rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStatestate)
 
-
+
 protected void
 MoveRegionProcedure.rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
 
-
+
 protected void
 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1f4f0eec6 -> c7c40c622


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
index b7c7db0..d571cc6 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class WALSplitter.EntryBuffers
+public static class WALSplitter.EntryBuffers
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class which accumulates edits and separates them into a 
buffer per region
  while simultaneously accounting RAM usage. Blocks if the RAM usage crosses
@@ -250,7 +250,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 controller
-WALSplitter.PipelineController controller
+WALSplitter.PipelineController controller
 
 
 
@@ -259,7 +259,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 buffers
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],WALSplitter.RegionEntryBuffer buffers
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],WALSplitter.RegionEntryBuffer buffers
 
 
 
@@ -268,7 +268,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 currentlyWriting
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Setbyte[] currentlyWriting
+http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Setbyte[] currentlyWriting
 
 
 
@@ -277,7 +277,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 totalBuffered
-long totalBuffered
+long totalBuffered
 
 
 
@@ -286,7 +286,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 maxHeapUsage
-long maxHeapUsage
+long maxHeapUsage
 
 
 
@@ -295,7 +295,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 splitWriterCreationBounded
-boolean splitWriterCreationBounded
+boolean splitWriterCreationBounded
 
 
 
@@ -312,7 +312,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EntryBuffers
-publicEntryBuffers(WALSplitter.PipelineControllercontroller,
+publicEntryBuffers(WALSplitter.PipelineControllercontroller,
 longmaxHeapUsage)
 
 
@@ -322,7 +322,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EntryBuffers
-publicEntryBuffers(WALSplitter.PipelineControllercontroller,
+publicEntryBuffers(WALSplitter.PipelineControllercontroller,
 longmaxHeapUsage,
 booleansplitWriterCreationBounded)
 
@@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 appendEntry
-publicvoidappendEntry(WAL.Entryentry)
+publicvoidappendEntry(WAL.Entryentry)
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Append a log entry into the corresponding region buffer.
@@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getChunkToWrite
-WALSplitter.RegionEntryBuffergetChunkToWrite()
+WALSplitter.RegionEntryBuffergetChunkToWrite()
 
 Returns:
 RegionEntryBuffer a buffer of edits to be written.
@@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 doneWriting
-voiddoneWriting(WALSplitter.RegionEntryBufferbuffer)
+voiddoneWriting(WALSplitter.RegionEntryBufferbuffer)
 
 
 
@@ -381,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isRegionCurrentlyWriting
-booleanisRegionCurrentlyWriting(byte[]region)
+booleanisRegionCurrentlyWriting(byte[]region)
 
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 waitUntilDrained
-publicvoidwaitUntilDrained()
+publicvoidwaitUntilDrained()
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4e7a24697 -> bb3985727


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.html
index 21d6c39..412fef2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.html
@@ -60,66 +60,67 @@
 052  String RIT_COUNT_DESC = "Current number 
of Regions In Transition (Gauge).";
 053  String RIT_COUNT_OVER_THRESHOLD_DESC 
=
 054  "Current number of Regions In 
Transition over threshold time (Gauge).";
-055  String RIT_OLDEST_AGE_DESC = "Timestamp 
in milliseconds of the oldest Region In Transition (Gauge).";
-056  String RIT_DURATION_DESC =
-057  "Total durations in milliseconds 
for all Regions in Transition (Histogram).";
-058
-059  String ASSIGN_METRIC_PREFIX = 
"assign";
-060  String UNASSIGN_METRIC_PREFIX = 
"unassign";
-061  String SPLIT_METRIC_PREFIX = "split";
-062  String MERGE_METRIC_PREFIX = "merge";
-063
-064  String OPERATION_COUNT_NAME = 
"operationCount";
-065
-066  /**
-067   * Set the number of regions in 
transition.
-068   *
-069   * @param ritCount count of the regions 
in transition.
-070   */
-071  void setRIT(int ritCount);
-072
-073  /**
-074   * Set the count of the number of 
regions that have been in transition over the threshold time.
-075   *
-076   * @param ritCountOverThreshold number 
of regions in transition for longer than threshold.
-077   */
-078  void setRITCountOverThreshold(int 
ritCountOverThreshold);
-079
-080  /**
-081   * Set the oldest region in 
transition.
-082   *
-083   * @param age age of the oldest RIT.
-084   */
-085  void setRITOldestAge(long age);
-086
-087  void updateRitDuration(long 
duration);
-088
-089  /**
-090   * TODO: Remove. This may not be needed 
now as assign and unassign counts are tracked separately
-091   * Increment the count of operations 
(assign/unassign).
-092   */
-093  void incrementOperationCounter();
-094
-095  /**
-096   * @return {@link OperationMetrics} 
containing common metrics for assign operation
-097   */
-098  OperationMetrics getAssignMetrics();
-099
-100  /**
-101   * @return {@link OperationMetrics} 
containing common metrics for unassign operation
-102   */
-103  OperationMetrics 
getUnassignMetrics();
-104
-105  /**
-106   * @return {@link OperationMetrics} 
containing common metrics for split operation
-107   */
-108  OperationMetrics getSplitMetrics();
-109
-110  /**
-111   * @return {@link OperationMetrics} 
containing common metrics for merge operation
-112   */
-113  OperationMetrics getMergeMetrics();
-114}
+055  String RIT_OLDEST_AGE_DESC =
+056  "Timestamp in milliseconds of the 
oldest Region In Transition (Gauge).";
+057  String RIT_DURATION_DESC =
+058  "Total durations in milliseconds 
for all Regions in Transition (Histogram).";
+059
+060  String ASSIGN_METRIC_PREFIX = 
"assign";
+061  String UNASSIGN_METRIC_PREFIX = 
"unassign";
+062  String SPLIT_METRIC_PREFIX = "split";
+063  String MERGE_METRIC_PREFIX = "merge";
+064
+065  String OPERATION_COUNT_NAME = 
"operationCount";
+066
+067  /**
+068   * Set the number of regions in 
transition.
+069   *
+070   * @param ritCount count of the regions 
in transition.
+071   */
+072  void setRIT(int ritCount);
+073
+074  /**
+075   * Set the count of the number of 
regions that have been in transition over the threshold time.
+076   *
+077   * @param ritCountOverThreshold number 
of regions in transition for longer than threshold.
+078   */
+079  void setRITCountOverThreshold(int 
ritCountOverThreshold);
+080
+081  /**
+082   * Set the oldest region in 
transition.
+083   *
+084   * @param age age of the oldest RIT.
+085   */
+086  void setRITOldestAge(long age);
+087
+088  void updateRitDuration(long 
duration);
+089
+090  /**
+091   * TODO: Remove. This may not be needed 
now as assign and unassign counts are tracked separately
+092   * Increment the count of operations 
(assign/unassign).
+093   */
+094  void incrementOperationCounter();
+095
+096  /**
+097   * @return {@link OperationMetrics} 
containing common metrics for assign operation
+098   */
+099  OperationMetrics getAssignMetrics();
+100
+101  /**
+102   * @return {@link OperationMetrics} 
containing common metrics for unassign operation
+103   */
+104  OperationMetrics 
getUnassignMetrics();
+105
+106  /**
+107   * @return {@link OperationMetrics} 
containing common metrics for split operation
+108   */
+109  OperationMetrics getSplitMetrics();
+110
+111  /**
+112   * @return {@link OperationMetrics} 
containing common metrics for merge operation

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3d44c39d8 -> 69506d415


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/StochasticLoadBalancer.ServerLocalityCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/StochasticLoadBalancer.ServerLocalityCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/StochasticLoadBalancer.ServerLocalityCostFunction.html
index 6e38b91..0a8579b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/StochasticLoadBalancer.ServerLocalityCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/StochasticLoadBalancer.ServerLocalityCostFunction.html
@@ -160,6 +160,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dc4d8e7fa -> 83bf61756


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b9b25b6b3 -> 63d6f7127


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.html 
b/devapidocs/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.html
index 365e307..03832e9 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.html
@@ -171,8 +171,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Cell
-filterCell(Cellcell,
-  
org.apache.hadoop.hbase.shaded.com.google.common.base.Predicatebyte[]famPredicate)
+filterCell(Cellcell,
+  
org.apache.hbase.thirdparty.com.google.common.base.Predicatebyte[]famPredicate)
 Filters the bulk load cell using the supplied 
predicate.
 
 
@@ -241,14 +241,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Method Detail
-
+
 
 
 
 
 filterCell
 publicCellfilterCell(Cellcell,
-   
org.apache.hadoop.hbase.shaded.com.google.common.base.Predicatebyte[]famPredicate)
+   
org.apache.hbase.thirdparty.com.google.common.base.Predicatebyte[]famPredicate)
 Filters the bulk load cell using the supplied 
predicate.
 
 Parameters:



[01/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 7254d5f48 -> d449e87f6


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html
new file mode 100644
index 000..ead231c
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html
@@ -0,0 +1,389 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ByteBufferChunkKeyValue (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
ByteBufferChunkKeyValue
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.ByteBufferKeyValue
+
+
+org.apache.hadoop.hbase.regionserver.ByteBufferChunkKeyValue
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
+
+
+
+@InterfaceAudience.Private
+public class ByteBufferChunkKeyValue
+extends ByteBufferKeyValue
+ByteBuffer based cell which has the chunkid at the 0th 
offset
+
+See Also:
+MemStoreLAB
+
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Cell
+Cell.Type
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.ByteBufferKeyValue
+buf,
 FIXED_OVERHEAD,
 length,
 offset
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
+MAX_TAGS_LENGTH
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ByteBufferChunkKeyValue(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
+   intoffset,
+   intlength)
+
+
+ByteBufferChunkKeyValue(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
+   intoffset,
+   intlength,
+   longseqId)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+int
+getChunkId()
+Extracts the id of the backing bytebuffer of this cell if 
it was obtained from fixed sized
+ chunks as in case of MemstoreLAB
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.ByteBufferKeyValue
+deepClone,
 equals,
 getBuffer,
 getFamilyArray,
 getFamilyByteBuffer,
 getFamilyLength,
 getFamilyOffset,
 getFamilyPosition,
 getOffset,
 getQualifierArray,
 getQualifierByteBuffer,
 getQualifierLength,
 getQualifierOffset,
 getQualifierPosition,
 getRowArray,
 getRowByteBuffer,
 getRowLength,
 getRowOffset, getRowPosition,
 getSequenceId,
 getSerializedSize,
 getTagsArray,
 getTagsByteBuffer,
 getTagsLength,
 getTagsOffset,
 getTagsPosition,
 getTimestamp, getTypeByte,
 getValueArray,
 getValueByteBuffer,
 getValueLength,
 getValueOffset,
 getValuePosition,
 hashCode,
 heapSize,
 setSequenceId,
 setTimestamp,
 setTimestamp,
 toString,
 write,
 write
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4cddebd1e -> d2b28a1a2


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferCell.html
index 3400507..2baa140 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferCell.html
@@ -28,3034 +28,2926 @@
 020import static 
org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
 021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 022
-023import 
com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.DataOutput;
-026import java.io.DataOutputStream;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.math.BigDecimal;
-030import java.nio.ByteBuffer;
-031import java.util.ArrayList;
-032import java.util.Iterator;
-033import java.util.List;
-034import java.util.Optional;
-035
-036import 
org.apache.hadoop.hbase.KeyValue.Type;
-037import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-038import 
org.apache.hadoop.hbase.io.HeapSize;
-039import 
org.apache.hadoop.hbase.io.TagCompressionContext;
-040import 
org.apache.hadoop.hbase.io.util.Dictionary;
-041import 
org.apache.hadoop.hbase.io.util.StreamUtils;
-042import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-043import 
org.apache.hadoop.hbase.util.ByteRange;
-044import 
org.apache.hadoop.hbase.util.Bytes;
-045import 
org.apache.hadoop.hbase.util.ClassSize;
-046import 
org.apache.yetus.audience.InterfaceAudience;
-047
-048
-049/**
-050 * Utility methods helpful slinging 
{@link Cell} instances. It has more powerful and
-051 * rich set of APIs than those in {@link 
CellUtil} for internal usage.
-052 */
-053@InterfaceAudience.Private
-054public final class PrivateCellUtil {
-055
-056  /**
-057   * Private constructor to keep this 
class from being instantiated.
-058   */
-059  private PrivateCellUtil() {
-060  }
+023import java.io.DataOutput;
+024import java.io.DataOutputStream;
+025import java.io.IOException;
+026import java.io.OutputStream;
+027import java.math.BigDecimal;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Iterator;
+031import java.util.List;
+032import java.util.Optional;
+033import 
org.apache.hadoop.hbase.KeyValue.Type;
+034import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+035import 
org.apache.hadoop.hbase.io.HeapSize;
+036import 
org.apache.hadoop.hbase.io.TagCompressionContext;
+037import 
org.apache.hadoop.hbase.io.util.Dictionary;
+038import 
org.apache.hadoop.hbase.io.util.StreamUtils;
+039import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+040import 
org.apache.hadoop.hbase.util.ByteRange;
+041import 
org.apache.hadoop.hbase.util.Bytes;
+042import 
org.apache.hadoop.hbase.util.ClassSize;
+043import 
org.apache.yetus.audience.InterfaceAudience;
+044
+045import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+046
+047/**
+048 * Utility methods helpful slinging 
{@link Cell} instances. It has more powerful and
+049 * rich set of APIs than those in {@link 
CellUtil} for internal usage.
+050 */
+051@InterfaceAudience.Private
+052public final class PrivateCellUtil {
+053
+054  /**
+055   * Private constructor to keep this 
class from being instantiated.
+056   */
+057  private PrivateCellUtil() {
+058  }
+059
+060  /*** ByteRange 
***/
 061
-062  /*** ByteRange 
***/
-063
-064  public static ByteRange 
fillRowRange(Cell cell, ByteRange range) {
-065return range.set(cell.getRowArray(), 
cell.getRowOffset(), cell.getRowLength());
-066  }
-067
-068  public static ByteRange 
fillFamilyRange(Cell cell, ByteRange range) {
-069return 
range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
cell.getFamilyLength());
-070  }
-071
-072  public static ByteRange 
fillQualifierRange(Cell cell, ByteRange range) {
-073return 
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-074  cell.getQualifierLength());
-075  }
-076
-077  public static ByteRange 
fillValueRange(Cell cell, ByteRange range) {
-078return 
range.set(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-079  }
-080
-081  public static ByteRange 
fillTagRange(Cell cell, ByteRange range) {
-082return range.set(cell.getTagsArray(), 
cell.getTagsOffset(), cell.getTagsLength());
-083  }
+062  public static ByteRange 
fillRowRange(Cell cell, ByteRange range) {
+063return range.set(cell.getRowArray(), 
cell.getRowOffset(), cell.getRowLength());
+064  }

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 637fc8695 -> b618ac405


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
index 86fc15e..d02bcdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
@@ -38,2586 +38,2590 @@
 030import java.nio.ByteBuffer;
 031import 
java.nio.charset.StandardCharsets;
 032import java.security.SecureRandom;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Comparator;
-036import java.util.Iterator;
-037import java.util.List;
-038
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CellComparator;
-041import 
org.apache.hadoop.hbase.KeyValue;
-042import 
org.apache.hadoop.io.RawComparator;
-043import 
org.apache.hadoop.io.WritableComparator;
-044import 
org.apache.hadoop.io.WritableUtils;
-045import 
org.apache.yetus.audience.InterfaceAudience;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051
-052import com.google.protobuf.ByteString;
-053
-054import sun.misc.Unsafe;
-055
-056/**
-057 * Utility class that handles byte 
arrays, conversions to/from other types,
-058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-059 * HashSets, and can be used as key in 
maps or trees.
-060 */
-061@SuppressWarnings("restriction")
-062@InterfaceAudience.Public
-063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-065justification="It has been like this 
forever")
-066public class Bytes implements 
ComparableBytes {
-067
-068  // Using the charset canonical name for 
String/byte[] conversions is much
-069  // more efficient due to use of cached 
encoders/decoders.
-070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-071
-072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(Bytes.class);
-076
-077  /**
-078   * Size of boolean in bytes
-079   */
-080  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-081
-082  /**
-083   * Size of byte in bytes
-084   */
-085  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-086
-087  /**
-088   * Size of char in bytes
-089   */
-090  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-091
-092  /**
-093   * Size of double in bytes
-094   */
-095  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-096
-097  /**
-098   * Size of float in bytes
-099   */
-100  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-101
-102  /**
-103   * Size of int in bytes
-104   */
-105  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-106
-107  /**
-108   * Size of long in bytes
-109   */
-110  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-111
-112  /**
-113   * Size of short in bytes
-114   */
-115  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-116
-117  /**
-118   * Mask to apply to a long to reveal 
the lower int only. Use like this:
-119   * int i = (int)(0xL ^ 
some_long_value);
-120   */
-121  public static final long 
MASK_FOR_LOWER_INT_IN_LONG = 0xL;
-122
-123  /**
-124   * Estimate of size cost to pay beyond 
payload in jvm for instance of byte [].
-125   * Estimate based on study of jhat and 
jprofiler numbers.
-126   */
-127  // JHat says BU is 56 bytes.
-128  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
-129  public static final int 
ESTIMATED_HEAP_TAX = 16;
-130
-131  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-132
-133  /**
-134   * Returns length of the byte array, 
returning 0 if the array is null.
-135   * Useful for calculating sizes.
-136   * @param b byte array, which can be 
null
-137   * @return 0 if b is null, otherwise 
returns length
-138   */
-139  final public static int len(byte[] b) 
{
-140return b == null ? 0 : b.length;
-141  }
-142
-143  private byte[] bytes;
-144  private int offset;
-145  private int length;
-146
-147  /**
-148   * Create a zero-size sequence.
-149   */
-150  public Bytes() {
-151super();
-152  }
-153
-154  /**
-155   * Create a Bytes using the byte array 
as the initial value.
-156   * @param bytes This array becomes 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d1bc8d7fa -> 7c0589c07


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index f7fbfbf..88ebcbc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -34,1583 +34,1583 @@
 026import java.io.IOException;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.EnumSet;
-032import java.util.HashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import java.util.Set;
-037import 
java.util.concurrent.CompletableFuture;
-038import java.util.concurrent.TimeUnit;
-039import 
java.util.concurrent.atomic.AtomicReference;
-040import java.util.function.BiConsumer;
-041import java.util.function.Function;
-042import java.util.regex.Pattern;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045import org.apache.commons.io.IOUtils;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-048import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HConstants;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-055import 
org.apache.hadoop.hbase.RegionLoad;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.TableExistsException;
-059import 
org.apache.hadoop.hbase.TableName;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-065import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-068import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-069import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-070import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-080import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-081import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-082import 
org.apache.hadoop.hbase.util.Bytes;
-083import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-084import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-085import 
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-090import 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
-091import 
org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
-092import 
org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4f18f8a6e -> c4b2cc17a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ServerLoad.html 
b/devapidocs/org/apache/hadoop/hbase/ServerLoad.html
index bf7e6d7..44841ae 100644
--- a/devapidocs/org/apache/hadoop/hbase/ServerLoad.html
+++ b/devapidocs/org/apache/hadoop/hbase/ServerLoad.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
+var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":42,"i46":42,"i47":42};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -107,11 +107,21 @@ var activeTableTab = "activeTableTab";
 
 
 
+
+All Implemented Interfaces:
+ServerMetrics
+
 
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ServerMetrics instead.
+
 
 @InterfaceAudience.Public
-public class ServerLoad
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+ http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+public class ServerLoad
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements ServerMetrics
 This class is used for exporting current state of load on a 
RegionServer.
 
 
@@ -133,71 +143,108 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private long
-currentCompactedKVs
+currentCompactedKVs
+Deprecated.
+
 
 
 static ServerLoad
-EMPTY_SERVERLOAD
+EMPTY_SERVERLOAD
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ServerMetricsBuilder.of(ServerName)
 instead.
+
+
 
 
 private long
-filteredReadRequestsCount
+filteredReadRequestsCount
+Deprecated.
+
 
 
 private int
-memstoreSizeMB
+memstoreSizeMB
+Deprecated.
+
 
 
-private long
-readRequestsCount
+private ServerMetrics
+metrics
+Deprecated.
+
 
 
 private long
-reportTime
+readRequestsCount
+Deprecated.
+
 
 
 private int
-rootIndexSizeKB
+rootIndexSizeKB
+Deprecated.
+
 
 
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad
-serverLoad
+serverLoad
+Deprecated.
+
 
 
 private long
-storefileIndexSizeKB
+storefileIndexSizeKB
+Deprecated.
+
 
 
 private int
-storefiles
+storefiles
+Deprecated.
+
 
 
 private int
-storefileSizeMB
+storefileSizeMB
+Deprecated.
+
 
 
 private int
-stores
+stores
+Deprecated.
+
 
 
 private int
-storeUncompressedSizeMB
+storeUncompressedSizeMB
+Deprecated.
+
 
 
 private long
-totalCompactingKVs
+totalCompactingKVs
+Deprecated.
+
 
 
 private int
-totalStaticBloomSizeKB
+totalStaticBloomSizeKB
+Deprecated.
+
 
 
 private int
-totalStaticIndexSizeKB
+totalStaticIndexSizeKB
+Deprecated.
+
 
 
 private long
-writeRequestsCount
+writeRequestsCount
+Deprecated.
+
 
 
 
@@ -214,7 +261,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-ServerLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoadserverLoad)
+ServerLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoadserverLoad)
+Deprecated.
+DONT USE this construction.
+
+
+
+ServerLoad(ServerMetricsmetrics)
+Deprecated.
+
+
+
+ServerLoad(ServerNamename,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoadserverLoad)
+Deprecated.
+
 
 
 
@@ -233,185 +294,392 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 boolean
-equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in 
java.lang">Objectother)
+equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 5eb63ae9d -> 505bbb2e1


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 4f79024..d3e92f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class RawAsyncHBaseAdmin
+class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -257,7 +257,7 @@ implements FLUSH_TABLE_PROCEDURE_SIGNATURE
 
 
-private static 
org.apache.commons.logging.Log
+private static org.slf4j.Logger
 LOG
 
 
@@ -1401,7 +1401,7 @@ implements 
 
 FLUSH_TABLE_PROCEDURE_SIGNATURE
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
 
 See Also:
 Constant
 Field Values
@@ -1414,7 +1414,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1423,7 +1423,7 @@ implements 
 
 connection
-private finalAsyncConnectionImpl connection
+private finalAsyncConnectionImpl connection
 
 
 
@@ -1432,7 +1432,7 @@ implements 
 
 retryTimer
-private 
finalorg.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer retryTimer
+private 
finalorg.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer retryTimer
 
 
 
@@ -1441,7 +1441,7 @@ implements 
 
 metaTable
-private finalAsyncTableAdvancedScanResultConsumer metaTable
+private finalAsyncTableAdvancedScanResultConsumer metaTable
 
 
 
@@ -1450,7 +1450,7 @@ implements 
 
 rpcTimeoutNs
-private finallong rpcTimeoutNs
+private finallong rpcTimeoutNs
 
 
 
@@ -1459,7 +1459,7 @@ implements 
 
 operationTimeoutNs
-private finallong operationTimeoutNs
+private finallong operationTimeoutNs
 
 
 
@@ -1468,7 +1468,7 @@ implements 
 
 pauseNs
-private finallong pauseNs
+private finallong pauseNs
 
 
 
@@ -1477,7 +1477,7 @@ implements 
 
 maxAttempts
-private finalint maxAttempts
+private finalint maxAttempts
 
 
 
@@ -1486,7 +1486,7 @@ implements 
 
 startLogErrorsCnt
-private finalint startLogErrorsCnt
+private finalint startLogErrorsCnt
 
 
 
@@ -1495,7 +1495,7 @@ implements 
 
 ng
-private finalNonceGenerator ng
+private finalNonceGenerator ng
 
 
 
@@ -1512,7 +1512,7 @@ implements 
 
 RawAsyncHBaseAdmin
-RawAsyncHBaseAdmin(AsyncConnectionImplconnection,
+RawAsyncHBaseAdmin(AsyncConnectionImplconnection,

org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimerretryTimer,
AsyncAdminBuilderBasebuilder)
 
@@ -1531,7 +1531,7 @@ implements 
 
 newMasterCaller
-privateTAsyncRpcRetryingCallerFactory.MasterRequestCallerBuilderTnewMasterCaller()
+privateTAsyncRpcRetryingCallerFactory.MasterRequestCallerBuilderTnewMasterCaller()
 
 
 
@@ -1540,7 +1540,7 @@ implements 
 
 newAdminCaller
-privateTAsyncRpcRetryingCallerFactory.AdminRequestCallerBuilderTnewAdminCaller()
+privateTAsyncRpcRetryingCallerFactory.AdminRequestCallerBuilderTnewAdminCaller()
 
 
 
@@ -1551,7 +1551,7 @@ implements 
 
 call
-privatePREQ,PRESP,RESPhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureRESPcall(HBaseRpcControllercontroller,
+privatePREQ,PRESP,RESPhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureRESPcall(HBaseRpcControllercontroller,

org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.Interfacestub,
PREQpreq,
RawAsyncHBaseAdmin.MasterRpcCallPRESP,PREQrpcCall,
@@ -1566,7 +1566,7 @@ implements 
 
 adminCall
-privatePREQ,PRESP,RESPhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureRESPadminCall(HBaseRpcControllercontroller,
+privatePREQ,PRESP,RESPhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 04e5a02a8 -> c4c0cfa51


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index e40bc33..5fa911e 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3457
+3453
 0
 0
-20529
+19823
 
 Files
 
@@ -309,16 +309,6 @@
 0
 7
 
-org/apache/hadoop/hbase/ArrayBackedTag.java
-0
-0
-1
-
-org/apache/hadoop/hbase/AsyncConsoleAppender.java
-0
-0
-1
-
 org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
 0
 0
@@ -327,307 +317,287 @@
 org/apache/hadoop/hbase/AuthUtil.java
 0
 0
-3
+2
 
-org/apache/hadoop/hbase/BaseConfigurable.java
-0
-0
-1
-
 org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
 0
 0
-4
-
-org/apache/hadoop/hbase/ByteBufferKeyValue.java
-0
-0
-1
+3
 
-org/apache/hadoop/hbase/ByteBufferTag.java
-0
-0
-1
-
 org/apache/hadoop/hbase/CategoryBasedTimeout.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/Cell.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CellBuilderFactory.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/CellComparatorImpl.java
 0
 0
-14
-
+13
+
 org/apache/hadoop/hbase/CellScanner.java
 0
 0
-2
-
+1
+
 org/apache/hadoop/hbase/CellUtil.java
 0
 0
 68
-
+
 org/apache/hadoop/hbase/ChoreService.java
 0
 0
-5
-
+4
+
 org/apache/hadoop/hbase/ClassFinder.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/ClusterId.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/ClusterManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ClusterStatus.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/CompatibilityFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CompoundConfiguration.java
 0
 0
-4
-
+3
+
 org/apache/hadoop/hbase/CoordinatedStateException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/CoordinatedStateManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/Coprocessor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/DistributedHBaseCluster.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/DoNotRetryIOException.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/DroppedSnapshotException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ExtendedCell.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/GenericTestUtils.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/HBaseCluster.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/HBaseClusterManager.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/HBaseConfiguration.java
 0
 0
-7
-
+6
+
 org/apache/hadoop/hbase/HBaseTestCase.java
 0
 0
 35
-
+
 org/apache/hadoop/hbase/HBaseTestingUtility.java
 0
 0
-267
-
+266
+
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
 40
-
+
 org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/HRegionInfo.java
 0
 0
 55
-
+
 org/apache/hadoop/hbase/HRegionLocation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HTableDescriptor.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/HTestConst.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/HadoopShimsImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HealthChecker.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/IndividualBytesFieldCell.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/IntegrationTestBase.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java
 0
 0
 51
-
+
 org/apache/hadoop/hbase/IntegrationTestIngest.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/IntegrationTestIngestWithACL.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/IntegrationTestManyRegions.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/IntegrationTestingUtility.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/JMXListener.java
 0
 0
 5
-
-org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.java
-0
-0
-1
 
 org/apache/hadoop/hbase/KeyValue.java
 0
 0
-118
+117
 
 org/apache/hadoop/hbase/KeyValueTestUtil.java
 0
 0
-9
+8
 
 org/apache/hadoop/hbase/KeyValueUtil.java
 0
 0
-30
+29
 
 org/apache/hadoop/hbase/LocalHBaseCluster.java
 0
@@ -652,7 +622,7 @@
 org/apache/hadoop/hbase/MiniHBaseCluster.java
 0
 0
-26
+28
 
 org/apache/hadoop/hbase/MockRegionServerServices.java
 0
@@ -667,17 +637,7 @@
 org/apache/hadoop/hbase/NamespaceDescriptor.java
 0
 0

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e071e2f52 -> 071f974ba


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.html
index 67e6eae..a83310a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.html
@@ -51,10 +51,10 @@
 043
 044import 
org.apache.hadoop.conf.Configuration;
 045import 
org.apache.hadoop.hbase.CellScanner;
-046import 
org.apache.hadoop.hbase.HConstants;
-047import org.apache.hadoop.hbase.Server;
-048import 
org.apache.yetus.audience.InterfaceAudience;
-049import 
org.apache.yetus.audience.InterfaceStability;
+046import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+047import 
org.apache.hadoop.hbase.HConstants;
+048import org.apache.hadoop.hbase.Server;
+049import 
org.apache.yetus.audience.InterfaceAudience;
 050import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
 051import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
 052import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
@@ -89,624 +89,623 @@
 081 *
 082 * @see BlockingRpcClient
 083 */
-084@InterfaceAudience.Private
-085@InterfaceStability.Evolving
-086public class SimpleRpcServer extends 
RpcServer {
-087
-088  protected int port; 
// port we listen on
-089  protected InetSocketAddress address;
// inet address we listen on
-090  private int readThreads;
// number of read threads
-091
-092  protected int socketSendBufferSize;
-093  protected final long purgeTimeout;
// in milliseconds
-094
-095  // maintains the set of client 
connections and handles idle timeouts
-096  private ConnectionManager 
connectionManager;
-097  private Listener listener = null;
-098  protected SimpleRpcServerResponder 
responder = null;
-099
-100  /** Listens on the socket. Creates jobs 
for the handler threads*/
-101  private class Listener extends Thread 
{
-102
-103private ServerSocketChannel 
acceptChannel = null; //the accept channel
-104private Selector selector = null; 
//the selector that we use for the server
-105private Reader[] readers = null;
-106private int currentReader = 0;
-107private final int 
readerPendingConnectionQueueLength;
-108
-109private ExecutorService readPool;
-110
-111public Listener(final String name) 
throws IOException {
-112  super(name);
-113  // The backlog of requests that we 
will have the serversocket carry.
-114  int backlogLength = 
conf.getInt("hbase.ipc.server.listen.queue.size", 128);
-115  readerPendingConnectionQueueLength 
=
-116  
conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
-117  // Create a new server socket and 
set to non blocking mode
-118  acceptChannel = 
ServerSocketChannel.open();
-119  
acceptChannel.configureBlocking(false);
-120
-121  // Bind the server socket to the 
binding addrees (can be different from the default interface)
-122  bind(acceptChannel.socket(), 
bindAddress, backlogLength);
-123  port = 
acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
-124  address = 
(InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
-125  // create a selector;
-126  selector = Selector.open();
-127
-128  readers = new 
Reader[readThreads];
-129  // Why this executor thing? Why not 
like hadoop just start up all the threads? I suppose it
-130  // has an advantage in that it is 
easy to shutdown the pool.
-131  readPool = 
Executors.newFixedThreadPool(readThreads,
-132new 
ThreadFactoryBuilder().setNameFormat(
-133  "Reader=%d,bindAddress=" + 
bindAddress.getHostName() +
-134  ",port=" + 
port).setDaemon(true)
-135
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
-136  for (int i = 0; i  readThreads; 
++i) {
-137Reader reader = new Reader();
-138readers[i] = reader;
-139readPool.execute(reader);
-140  }
-141  LOG.info(getName() + ": started " + 
readThreads + " reader(s) listening on port=" + port);
-142
-143  // Register accepts on the server 
socket with the selector.
-144  acceptChannel.register(selector, 
SelectionKey.OP_ACCEPT);
-145  this.setName("Listener,port=" + 
port);
-146  this.setDaemon(true);
-147}
+084@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG})
+085public class SimpleRpcServer extends 
RpcServer {
+086
+087  protected int port; 
// port we listen on
+088  protected InetSocketAddress address;

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8e79a8db9 -> dc4e5c856


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index 219283e..2b5d70b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -435,1198 +435,1203 @@
 427
 428if (backingMap.containsKey(cacheKey)) 
{
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-431throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-432+ "cacheKey:" + cacheKey);
-433  }
-434   String msg = "Caching an already 
cached block: " + cacheKey;
-435   msg += ". This is harmless and can 
happen in rare cases (see HBASE-8547)";
-436   LOG.warn(msg);
-437  return;
-438}
-439
-440/*
-441 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-442 */
-443RAMQueueEntry re =
-444new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-445if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-446  return;
-447}
-448int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-449BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-450boolean successfulAddition = false;
-451if (wait) {
-452  try {
-453successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-454  } catch (InterruptedException e) 
{
-455
Thread.currentThread().interrupt();
-456  }
-457} else {
-458  successfulAddition = 
bq.offer(re);
-459}
-460if (!successfulAddition) {
-461  ramCache.remove(cacheKey);
-462  cacheStats.failInsert();
-463} else {
-464  this.blockNumber.increment();
-465  
this.heapSize.add(cachedItem.heapSize());
-466  blocksByHFile.add(cacheKey);
-467}
-468  }
-469
-470  /**
-471   * Get the buffer of the block with the 
specified key.
-472   * @param key block's cache key
-473   * @param caching true if the caller 
caches blocks on cache misses
-474   * @param repeat Whether this is a 
repeat lookup for the same block
-475   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-476   * @return buffer of specified cache 
key, or null if not in cache
-477   */
-478  @Override
-479  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-480  boolean updateCacheMetrics) {
-481if (!cacheEnabled) {
-482  return null;
-483}
-484RAMQueueEntry re = 
ramCache.get(key);
-485if (re != null) {
-486  if (updateCacheMetrics) {
-487cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-488  }
-489  
re.access(accessCount.incrementAndGet());
-490  return re.getData();
-491}
-492BucketEntry bucketEntry = 
backingMap.get(key);
-493if (bucketEntry != null) {
-494  long start = System.nanoTime();
-495  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-496  try {
-497lock.readLock().lock();
-498// We can not read here even if 
backingMap does contain the given key because its offset
-499// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-500// existence here.
-501if 
(bucketEntry.equals(backingMap.get(key))) {
-502  // TODO : change this area - 
should be removed after server cells and
-503  // 12295 are available
-504  int len = 
bucketEntry.getLength();
-505  if (LOG.isTraceEnabled()) {
-506LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-507  }
-508  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-509  
bucketEntry.deserializerReference(this.deserialiserMap));
-510  long timeTaken = 
System.nanoTime() - start;
-511  if (updateCacheMetrics) {
-512cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-513
cacheStats.ioHit(timeTaken);
-514  }
-515  if (cachedBlock.getMemoryType() 
== MemoryType.SHARED) {
-516
bucketEntry.refCount.incrementAndGet();
-517  }
-518  
bucketEntry.access(accessCount.incrementAndGet());
-519  if (this.ioErrorStartTime  
0) {
-520ioErrorStartTime = -1;
-521  }
-522  return cachedBlock;
-523}
-524  } catch (IOException ioex) {
-525

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 76952d27a -> 4abd958d5


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterStatusListener.DeadServerHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterStatusListener.DeadServerHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterStatusListener.DeadServerHandler.html
index b7e66b5..267ef6f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterStatusListener.DeadServerHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterStatusListener.DeadServerHandler.html
@@ -28,41 +28,41 @@
 020package org.apache.hadoop.hbase.client;
 021
 022
-023import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-024import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufInputStream;
-025import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-026import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption;
-027import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
-028import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-029import 
org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
-030import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.DatagramChannel;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.DatagramPacket;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioDatagramChannel;
+023
+024import java.io.Closeable;
+025import java.io.IOException;
+026import java.lang.reflect.Constructor;
+027import 
java.lang.reflect.InvocationTargetException;
+028import java.net.InetAddress;
+029import java.net.NetworkInterface;
+030import java.net.UnknownHostException;
+031import java.util.ArrayList;
+032import java.util.List;
 033
-034import java.io.Closeable;
-035import java.io.IOException;
-036import java.lang.reflect.Constructor;
-037import 
java.lang.reflect.InvocationTargetException;
-038import java.net.InetAddress;
-039import java.net.NetworkInterface;
-040import java.net.UnknownHostException;
-041import java.util.ArrayList;
-042import java.util.List;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.ClusterStatus;
-048import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.yetus.audience.InterfaceAudience;
-052import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-054import 
org.apache.hadoop.hbase.util.Addressing;
-055import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-056import 
org.apache.hadoop.hbase.util.Threads;
-057
+034import org.apache.commons.logging.Log;
+035import 
org.apache.commons.logging.LogFactory;
+036import 
org.apache.hadoop.conf.Configuration;
+037import 
org.apache.hadoop.hbase.ClusterStatus;
+038import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.ServerName;
+041import 
org.apache.hadoop.hbase.util.Addressing;
+042import 
org.apache.hadoop.hbase.util.ExceptionUtil;
+043import 
org.apache.hadoop.hbase.util.Threads;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045
+046import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
+047import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufInputStream;
+048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
+049import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption;
+050import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
+051import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
+052import 
org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
+053import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.DatagramChannel;
+054import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.DatagramPacket;
+055import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioDatagramChannel;
+056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 058
 059/**
 060 * A class that receives the cluster 
status, and provide it as a set of service to the client.
@@ -112,7 +112,7 @@
 104 * Called to connect.
 105 *
 106 * @param conf Configuration to 
use.
-107 * @throws IOException
+107 * @throws IOException if failing to 
connect
 108 */
 109void connect(Configuration conf) 
throws IOException;
 110  }
@@ -205,76 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 116e12f36 -> e23b49ba9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OnheapDecodedCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OnheapDecodedCell.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OnheapDecodedCell.html
index 11b96bc..2dfc324 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OnheapDecodedCell.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OnheapDecodedCell.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
 
 
 Enclosing class:
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static class BufferedDataBlockEncoder.OnheapDecodedCell
+protected static class BufferedDataBlockEncoder.OnheapDecodedCell
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ExtendedCell
 Copies only the key part of the keybuffer by doing a deep 
copy and passes the
@@ -377,8 +377,7 @@ implements 
 
 void
-setTimestamp(byte[]ts,
-inttsOffset)
+setTimestamp(byte[]ts)
 Sets with the given timestamp.
 
 
@@ -448,7 +447,7 @@ implements 
 
 FIXED_OVERHEAD
-private static finallong FIXED_OVERHEAD
+private static finallong FIXED_OVERHEAD
 
 
 
@@ -457,7 +456,7 @@ implements 
 
 keyOnlyBuffer
-privatebyte[] keyOnlyBuffer
+privatebyte[] keyOnlyBuffer
 
 
 
@@ -466,7 +465,7 @@ implements 
 
 rowLength
-privateshort rowLength
+privateshort rowLength
 
 
 
@@ -475,7 +474,7 @@ implements 
 
 familyOffset
-privateint familyOffset
+privateint familyOffset
 
 
 
@@ -484,7 +483,7 @@ implements 
 
 familyLength
-privatebyte familyLength
+privatebyte familyLength
 
 
 
@@ -493,7 +492,7 @@ implements 
 
 qualifierOffset
-privateint qualifierOffset
+privateint qualifierOffset
 
 
 
@@ -502,7 +501,7 @@ implements 
 
 qualifierLength
-privateint qualifierLength
+privateint qualifierLength
 
 
 
@@ -511,7 +510,7 @@ implements 
 
 timestamp
-privatelong timestamp
+privatelong timestamp
 
 
 
@@ -520,7 +519,7 @@ implements 
 
 typeByte
-privatebyte typeByte
+privatebyte typeByte
 
 
 
@@ -529,7 +528,7 @@ implements 
 
 valueBuffer
-privatebyte[] valueBuffer
+privatebyte[] valueBuffer
 
 
 
@@ -538,7 +537,7 @@ implements 
 
 valueOffset
-privateint valueOffset
+privateint valueOffset
 
 
 
@@ -547,7 +546,7 @@ implements 
 
 valueLength
-privateint valueLength
+privateint valueLength
 
 
 
@@ -556,7 +555,7 @@ implements 
 
 tagsBuffer
-privatebyte[] tagsBuffer
+privatebyte[] tagsBuffer
 
 
 
@@ -565,7 +564,7 @@ implements 
 
 tagsOffset
-privateint tagsOffset
+privateint tagsOffset
 
 
 
@@ -574,7 +573,7 @@ implements 
 
 tagsLength
-privateint tagsLength
+privateint tagsLength
 
 
 
@@ -583,7 +582,7 @@ implements 
 
 seqId
-privatelong seqId
+privatelong seqId
 
 
 
@@ -600,7 +599,7 @@ implements 
 
 OnheapDecodedCell
-protectedOnheapDecodedCell(byte[]keyBuffer,
+protectedOnheapDecodedCell(byte[]keyBuffer,
 shortrowLength,
 intfamilyOffset,
 bytefamilyLength,
@@ -631,7 +630,7 @@ implements 
 
 getRowArray
-publicbyte[]getRowArray()
+publicbyte[]getRowArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array. Max length is
  Short.MAX_VALUE which is 32,767 bytes.
@@ -649,7 +648,7 @@ implements 
 
 getFamilyArray
-publicbyte[]getFamilyArray()
+publicbyte[]getFamilyArray()
 Description copied from 
interface:Cell
 Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
  containing array. Max length is Byte.MAX_VALUE, which is 127 bytes.
@@ -667,7 +666,7 @@ implements 
 
 getQualifierArray
-publicbyte[]getQualifierArray()
+publicbyte[]getQualifierArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array.
 
@@ -684,7 +683,7 @@ implements 
 
 getRowOffset
-publicintgetRowOffset()
+publicintgetRowOffset()
 
 Specified by:
 getRowOffsetin
 interfaceCell
@@ -699,7 +698,7 @@ implements 
 
 getRowLength
-publicshortgetRowLength()
+publicshortgetRowLength()
 
 Specified by:
 getRowLengthin
 interfaceCell
@@ -714,7 +713,7 @@ implements 
 
 getFamilyOffset

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 89207f3b0 -> d171b8965


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-spark/dependency-info.html
--
diff --git a/hbase-build-configuration/hbase-spark/dependency-info.html 
b/hbase-build-configuration/hbase-spark/dependency-info.html
index 9742b61..ea6e8d0 100644
--- a/hbase-build-configuration/hbase-spark/dependency-info.html
+++ b/hbase-build-configuration/hbase-spark/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 



[01/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 21e0e1161 -> c54c242b2


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/overview-tree.html
--
diff --git a/testdevapidocs/overview-tree.html 
b/testdevapidocs/overview-tree.html
index cfa835f..af906f2 100644
--- a/testdevapidocs/overview-tree.html
+++ b/testdevapidocs/overview-tree.html
@@ -2683,6 +2683,7 @@
 org.apache.hadoop.hbase.coprocessor.TestImportExport
 
 
+org.apache.hadoop.hbase.mapreduce.TestImportExport.TableWALActionListener 
(implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
 org.apache.hadoop.hbase.mapreduce.TestImportTsv (implements 
org.apache.hadoop.conf.Configurable)
 org.apache.hadoop.hbase.mapreduce.TestImportTsvParser
 org.apache.hadoop.hbase.mapreduce.TestImportTSVWithOperationAttributes 
(implements org.apache.hadoop.conf.Configurable)
@@ -2996,6 +2997,7 @@
 org.apache.hadoop.hbase.client.TestRawAsyncTableLimitedScanWithFilter
 org.apache.hadoop.hbase.client.TestRawAsyncTablePartialScan
 org.apache.hadoop.hbase.types.TestRawString
+org.apache.hadoop.hbase.zookeeper.TestReadOnlyZKClient
 org.apache.hadoop.hbase.zookeeper.TestRecoverableZooKeeper
 org.apache.hadoop.hbase.regionserver.TestRecoveredEdits
 org.apache.hadoop.hbase.metrics.impl.TestRefCountingMap
@@ -3480,14 +3482,22 @@
 org.apache.hadoop.hbase.security.visibility.TestVisibilityLablesWithGroups
 org.apache.hadoop.hbase.security.visibility.TestVisibilityWithCheckAuths
 org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener
+org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener.DummyWALActionsListener 
(implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
+
+org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad.FindBulkHBaseListener
+
+
 org.apache.hadoop.hbase.regionserver.TestWalAndCompactingMemStoreFlush
 org.apache.hadoop.hbase.regionserver.TestWalAndCompactingMemStoreFlush.ConcurrentPutRunnable
 (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
 org.apache.hadoop.hbase.regionserver.wal.TestWALCellCodecWithCompression
 org.apache.hadoop.hbase.replication.regionserver.TestWALEntryStream
+org.apache.hadoop.hbase.replication.regionserver.TestWALEntryStream.PathWatcher (implements 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
 org.apache.hadoop.hbase.wal.TestWALFactory
+org.apache.hadoop.hbase.wal.TestWALFactory.DumbWALActionsListener 
(implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
 org.apache.hadoop.hbase.wal.TestWALFiltering
 org.apache.hadoop.hbase.regionserver.TestWALLockup
 org.apache.hadoop.hbase.regionserver.TestWALLockup.DummyServer (implements 
org.apache.hadoop.hbase.Server)
+org.apache.hadoop.hbase.regionserver.TestWALLockup.DummyWALActionsListener 
(implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
 org.apache.hadoop.hbase.wal.TestWALMethods
 org.apache.hadoop.hbase.regionserver.TestWALMonotonicallyIncreasingSeqId
 org.apache.hadoop.hbase.coprocessor.TestWALObserver
@@ -3678,19 +3688,6 @@
 
 org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil
 org.apache.hadoop.hbase.Waiter
-org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base 
(implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener)
-
-org.apache.hadoop.hbase.mapreduce.TestImportExport.TableWALActionListener
-org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener.DummyWALActionsListener
-
-org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad.FindBulkHBaseListener
-
-
-org.apache.hadoop.hbase.replication.regionserver.TestWALEntryStream.PathWatcher
-org.apache.hadoop.hbase.wal.TestWALFactory.DumbWALActionsListener
-org.apache.hadoop.hbase.regionserver.TestWALLockup.DummyWALActionsListener
-
-
 org.apache.hadoop.hbase.regionserver.wal.WALCellCodec 
(implements org.apache.hadoop.hbase.codec.Codec)
 
 org.apache.hadoop.hbase.regionserver.wal.TestCustomWALCellCodec.CustomWALCellCodec
@@ -3800,6 +3797,7 @@
 org.apache.hadoop.hbase.Waiter.ExplainingPredicateE
 
 
+org.apache.hadoop.hbase.testclassification.ZKTests
 
 Enum Hierarchy
 



[01/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 51b7ea776 -> 713d773f1


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
index 2daacb5..fb5cc60 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
@@ -25,413 +25,414 @@
 017 */
 018package org.apache.hadoop.hbase.master;
 019
-020import java.util.Date;
-021
-022import 
org.apache.hadoop.hbase.ServerName;
-023import 
org.apache.hadoop.hbase.client.RegionInfo;
-024import 
org.apache.yetus.audience.InterfaceAudience;
-025import 
org.apache.yetus.audience.InterfaceStability;
-026
-027import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-029
-030/**
-031 * State of a Region while undergoing 
transitions.
-032 * This class is immutable.
-033 */
-034@InterfaceAudience.Private
-035public class RegionState {
-036
-037  @InterfaceAudience.Private
-038  @InterfaceStability.Evolving
-039  public enum State {
-040OFFLINE,// region is in an 
offline state
-041OPENING,// server has begun 
to open but not yet done
-042OPEN,   // server opened 
region and updated meta
-043CLOSING,// server has begun 
to close but not yet done
-044CLOSED, // server closed 
region and updated meta
-045SPLITTING,  // server started 
split of a region
-046SPLIT,  // server completed 
split of a region
-047FAILED_OPEN,// failed to open, 
and won't retry any more
-048FAILED_CLOSE,   // failed to close, 
and won't retry any more
-049MERGING,// server started 
merge a region
-050MERGED, // server completed 
merge a region
-051SPLITTING_NEW,  // new region to be 
created when RS splits a parent
-052// region but hasn't 
be created yet, or master doesn't
-053// know it's already 
created
-054MERGING_NEW;// new region to be 
created when RS merges two
-055// daughter regions 
but hasn't be created yet, or
-056// master doesn't 
know it's already created
-057
-058/**
-059 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
-060 */
-061public 
ClusterStatusProtos.RegionState.State convert() {
-062  
ClusterStatusProtos.RegionState.State rs;
-063  switch (this) {
-064  case OFFLINE:
-065rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
-066break;
-067  case OPENING:
-068rs = 
ClusterStatusProtos.RegionState.State.OPENING;
-069break;
-070  case OPEN:
-071rs = 
ClusterStatusProtos.RegionState.State.OPEN;
-072break;
-073  case CLOSING:
-074rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
-075break;
-076  case CLOSED:
-077rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
-078break;
-079  case SPLITTING:
-080rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
-081break;
-082  case SPLIT:
-083rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
-084break;
-085  case FAILED_OPEN:
-086rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
-087break;
-088  case FAILED_CLOSE:
-089rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
-090break;
-091  case MERGING:
-092rs = 
ClusterStatusProtos.RegionState.State.MERGING;
-093break;
-094  case MERGED:
-095rs = 
ClusterStatusProtos.RegionState.State.MERGED;
-096break;
-097  case SPLITTING_NEW:
-098rs = 
ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
-099break;
-100  case MERGING_NEW:
-101rs = 
ClusterStatusProtos.RegionState.State.MERGING_NEW;
-102break;
-103  default:
-104throw new 
IllegalStateException("");
-105  }
-106  return rs;
-107}
-108
-109/**
-110 * Convert a protobuf 
HBaseProtos.RegionState.State to a RegionState.State
-111 *
-112 * @return the RegionState.State
-113 */
-114public static State 
convert(ClusterStatusProtos.RegionState.State protoState) {
-115  State state;
-116  switch (protoState) {
-117  case OFFLINE:
-118state = OFFLINE;
-119break;
-120  case PENDING_OPEN:
-121  case OPENING:
-122state = OPENING;
-123break;
-124  case OPEN:
-125state = OPEN;
-126break;
-127  case PENDING_CLOSE:
-128  case CLOSING:
-129state = CLOSING;
-130break;
-131  case CLOSED:
-132state = 

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 83b248d3f -> fd365a2bc


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9f95f7932 -> b9722a17b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
index c552d8a..a792ab2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
@@ -48,922 +48,928 @@
 040import 
java.util.concurrent.ThreadPoolExecutor;
 041import java.util.concurrent.TimeUnit;
 042import 
java.util.concurrent.atomic.AtomicLong;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor;
-051import org.apache.hadoop.hbase.Server;
-052import 
org.apache.hadoop.hbase.TableDescriptors;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import 
org.apache.hadoop.hbase.client.Connection;
-056import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-057import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-058import 
org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
-059import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-060import 
org.apache.hadoop.hbase.replication.ReplicationException;
-061import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-062import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-063import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-064import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-065import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-066import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-067import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-071
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-073import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-074
-075/**
-076 * This class is responsible to manage 
all the replication
-077 * sources. There are two classes of 
sources:
-078 * ul
-079 * li Normal sources are 
persistent and one per peer cluster/li
-080 * li Old sources are recovered 
from a failed region server and our
-081 * only goal is to finish replicating the 
WAL queue it had up in ZK/li
-082 * /ul
-083 *
-084 * When a region server dies, this class 
uses a watcher to get notified and it
-085 * tries to grab a lock in order to 
transfer all the queues in a local
-086 * old source.
-087 *
-088 * This class implements the 
ReplicationListener interface so that it can track changes in
-089 * replication state.
-090 */
-091@InterfaceAudience.Private
-092public class ReplicationSourceManager 
implements ReplicationListener {
-093  private static final Log LOG =
-094  
LogFactory.getLog(ReplicationSourceManager.class);
-095  // List of all the sources that read 
this RS's logs
-096  private final 
ListReplicationSourceInterface sources;
-097  // List of all the sources we got from 
died RSs
-098  private final 
ListReplicationSourceInterface oldsources;
-099  private final ReplicationQueues 
replicationQueues;
-100  private final ReplicationTracker 
replicationTracker;
-101  private final ReplicationPeers 
replicationPeers;
-102  // UUID for this cluster
-103  private final UUID clusterId;
-104  // All about stopping
-105  private final Server server;
-106  // All logs we are currently tracking
-107  // Index structure of the map is: 
peer_id-logPrefix/logGroup-logs
-108  private final MapString, 
MapString, SortedSetString walsById;
-109  // Logs for recovered sources we are 
currently tracking
-110  private final MapString, 
MapString, SortedSetString walsByIdRecoveredQueues;
-111  private final Configuration conf;
-112  private final FileSystem fs;
-113  // The paths to the latest log of each 
wal group, for new coming peers
-114  private SetPath latestPaths;
-115  // Path to the wals directories
-116  private final Path logDir;
-117  // Path to the wal archive
-118  private final Path oldLogDir;
-119  private final WALFileLengthProvider 
walFileLengthProvider;
-120  // The number of ms 

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 0c9c1d65d -> b1eb74535


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html
index 9098105..b05691f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html
@@ -37,1514 +37,1514 @@
 029import java.util.ArrayList;
 030import java.util.Iterator;
 031import java.util.List;
-032
-033import 
org.apache.hadoop.hbase.KeyValue.Type;
-034import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-035import 
org.apache.hadoop.hbase.io.HeapSize;
-036import 
org.apache.hadoop.hbase.io.TagCompressionContext;
-037import 
org.apache.hadoop.hbase.io.util.Dictionary;
-038import 
org.apache.hadoop.hbase.io.util.StreamUtils;
-039import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-040import 
org.apache.hadoop.hbase.util.ByteRange;
-041import 
org.apache.hadoop.hbase.util.Bytes;
-042import 
org.apache.hadoop.hbase.util.ClassSize;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044
-045import 
com.google.common.annotations.VisibleForTesting;
-046
-047/**
-048 * Utility methods helpful slinging 
{@link Cell} instances. It has more powerful and
-049 * rich set of APIs than those in {@link 
CellUtil} for internal usage.
-050 */
-051@InterfaceAudience.Private
-052// TODO : Make Tag IA.LimitedPrivate and 
move some of the Util methods to CP exposed Util class
-053public class PrivateCellUtil {
+032import java.util.Optional;
+033
+034import 
org.apache.hadoop.hbase.KeyValue.Type;
+035import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
+036import 
org.apache.hadoop.hbase.io.HeapSize;
+037import 
org.apache.hadoop.hbase.io.TagCompressionContext;
+038import 
org.apache.hadoop.hbase.io.util.Dictionary;
+039import 
org.apache.hadoop.hbase.io.util.StreamUtils;
+040import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+041import 
org.apache.hadoop.hbase.util.ByteRange;
+042import 
org.apache.hadoop.hbase.util.Bytes;
+043import 
org.apache.hadoop.hbase.util.ClassSize;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045
+046import 
com.google.common.annotations.VisibleForTesting;
+047
+048/**
+049 * Utility methods helpful slinging 
{@link Cell} instances. It has more powerful and
+050 * rich set of APIs than those in {@link 
CellUtil} for internal usage.
+051 */
+052@InterfaceAudience.Private
+053public final class PrivateCellUtil {
 054
 055  /**
 056   * Private constructor to keep this 
class from being instantiated.
 057   */
 058  private PrivateCellUtil() {
-059
-060  }
-061
-062  /*** ByteRange 
***/
-063
-064  public static ByteRange 
fillRowRange(Cell cell, ByteRange range) {
-065return range.set(cell.getRowArray(), 
cell.getRowOffset(), cell.getRowLength());
-066  }
-067
-068  public static ByteRange 
fillFamilyRange(Cell cell, ByteRange range) {
-069return 
range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
cell.getFamilyLength());
-070  }
-071
-072  public static ByteRange 
fillQualifierRange(Cell cell, ByteRange range) {
-073return 
range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-074  cell.getQualifierLength());
-075  }
-076
-077  public static ByteRange 
fillValueRange(Cell cell, ByteRange range) {
-078return 
range.set(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-079  }
-080
-081  public static ByteRange 
fillTagRange(Cell cell, ByteRange range) {
-082return range.set(cell.getTagsArray(), 
cell.getTagsOffset(), cell.getTagsLength());
-083  }
-084
-085  /**
-086   * Returns tag value in a new byte 
array. If server-side, use {@link Tag#getValueArray()} with
-087   * appropriate {@link 
Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
-088   * allocations.
-089   * @param cell
-090   * @return tag value in a new byte 
array.
-091   */
-092  public static byte[] getTagsArray(Cell 
cell) {
-093byte[] output = new 
byte[cell.getTagsLength()];
-094copyTagsTo(cell, output, 0);
-095return output;
-096  }
-097
-098  public static byte[] cloneTags(Cell 
cell) {
-099byte[] output = new 
byte[cell.getTagsLength()];
-100copyTagsTo(cell, output, 0);
-101return output;
-102  }
-103
-104  /**
-105   * Copies the tags info into the tag 
portion of the cell
-106   * @param cell
-107   * @param destination
-108   * @param destinationOffset
-109   * @return position after tags
+059  }
+060
+061  /*** ByteRange 
***/
+062
+063  public static ByteRange 
fillRowRange(Cell cell, 

  1   2   3   >