[hbase-operator-tools] branch master updated: Add assigns/unassigns commands.

2018-09-17 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new c6af272  Add assigns/unassigns commands.
c6af272 is described below

commit c6af272f82cd70694dcc717a4b7c5c6dcefa0505
Author: Michael Stack 
AuthorDate: Mon Sep 17 14:29:32 2018 -0700

Add assigns/unassigns commands.

Tests are disabled for the moment while figure issue with shaded client.
---
 hbase-hbck2/pom.xml|  8 +-
 .../src/main/java/org/apache/hbase/HBCK2.java  | 91 
 .../src/test/java/org/apache/hbase/TestHBCK2.java  | 96 ++
 pom.xml|  2 +-
 4 files changed, 181 insertions(+), 16 deletions(-)

diff --git a/hbase-hbck2/pom.xml b/hbase-hbck2/pom.xml
index 4996f97..a550ebe 100644
--- a/hbase-hbck2/pom.xml
+++ b/hbase-hbck2/pom.xml
@@ -114,7 +114,13 @@
 
   org.apache.hbase
   hbase-shaded-client
-  2.1.1-SNAPSHOT
+  ${hbase.version}
+
+
+  org.apache.hbase
+  hbase-testing-util
+  ${hbase.version}
+  test
 
   
 
diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java 
b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
index 4234637..2d06a78 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
@@ -41,6 +41,7 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.util.Arrays;
+import java.util.List;
 import java.util.stream.Collectors;
 
 /**
@@ -49,6 +50,7 @@ import java.util.stream.Collectors;
  */
 // TODO:
 // + Add bulk assign/unassigns. If 60k OPENING regions, doing it via shell 
takes 10-60 seconds each.
+// + On assign, can we look to see if existing assign and if so fail until 
cancelled?
 // + Add test of Master version to ensure it supports hbck functionality.
 // + Doc how we just take pointer to zk ensemble... If want to do more exotic 
config. on client,
 // then add a hbase-site.xml onto CLASSPATH for this tool to pick up.
@@ -56,11 +58,42 @@ public class HBCK2 {
   private static final Logger LOG = LogManager.getLogger(HBCK2.class);
   public static final int EXIT_SUCCESS = 0;
   public static final int EXIT_FAILURE = 1;
+  // Commands
   private static final String SET_TABLE_STATE = "setTableState";
+  private static final String ASSIGN = "assign";
+  private static final String UNASSIGN = "unassign";
+
+  static TableState setTableState(Configuration conf, TableName tableName, 
TableState.State state)
+  throws IOException {
+try (ClusterConnection conn = 
(ClusterConnection)ConnectionFactory.createConnection(conf)) {
+  try (Hbck hbck = conn.getHbck()) {
+return hbck.setTableStateInMeta(new TableState(tableName, state));
+  }
+}
+  }
+
+  static List assigns(Configuration conf, List 
encodedRegionNames)
+  throws IOException {
+try (ClusterConnection conn = 
(ClusterConnection)ConnectionFactory.createConnection(conf)) {
+  try (Hbck hbck = conn.getHbck()) {
+return hbck.assigns(encodedRegionNames);
+  }
+}
+  }
+
+  static List unassigns(Configuration conf, List 
encodedRegionNames)
+  throws IOException {
+try (ClusterConnection conn = 
(ClusterConnection)ConnectionFactory.createConnection(conf)) {
+  try (Hbck hbck = conn.getHbck()) {
+return hbck.unassigns(encodedRegionNames);
+  }
+}
+  }
 
   private static final String getCommandUsage() {
 StringWriter sw = new StringWriter();
 PrintWriter writer = new PrintWriter(sw);
+writer.println();
 writer.println("Commands:");
 writer.println(" " + SET_TABLE_STATE + "  ");
 writer.println("   Possible table states: " + 
Arrays.stream(TableState.State.values()).
@@ -70,6 +103,25 @@ public class HBCK2 {
 writer.println("   A value of \\x08\\x00 == ENABLED, \\x08\\x01 == 
DISABLED, etc.");
 writer.println("   An example making table name 'user' ENABLED:");
 writer.println(" $ HBCK2 setTableState users ENABLED");
+writer.println("   Returns whatever the previous table state was.");
+writer.println();
+writer.println(" " + ASSIGN + "  ...");
+writer.println("   A 'raw' assign that can be used even during Master 
initialization.");
+writer.println("   Skirts Coprocessors. Pass one or more encoded 
RegionNames:");
+writer.println("   e.g. 1588230740 is hard-coded encoding for hbase:meta 
region and");
+writer.println("   de00010733901a05f5a2a3a382e27dd4 is an example of what 
a random");
+writer.println("   user-space encoded Region name looks like. For 
example:");
+writer.println(" $ HBCK2 assign 1588230740 
de00010733901a05f5a2a3a382e27dd4");
+writer.println("   Returns the pid of the created AssignProcedure or -1 

hbase git commit: HBASE-21102 ServerCrashProcedure should select target server where no other replicas exist for the current region (Ram)

2018-09-17 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 39e0b8515 -> 27b772ddc


HBASE-21102 ServerCrashProcedure should select target server where no
other replicas exist for the current region (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/27b772dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/27b772dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/27b772dd

Branch: refs/heads/branch-2.1
Commit: 27b772ddc63d1ebb6fe1cd6cd12ca2fc6f897286
Parents: 39e0b85
Author: Vasudevan 
Authored: Mon Sep 17 22:36:50 2018 +0530
Committer: Vasudevan 
Committed: Mon Sep 17 22:36:50 2018 +0530

--
 .../hbase/master/assignment/RegionStates.java   | 64 +++
 .../hbase/master/balancer/BaseLoadBalancer.java | 56 ++---
 .../hadoop/hbase/HBaseTestingUtility.java   | 30 +--
 .../procedure/TestServerCrashProcedure.java | 34 +---
 .../TestServerCrashProcedureWithReplicas.java   | 84 
 5 files changed, 224 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/27b772dd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index 5c349e5..40e82f9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -41,12 +41,14 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -820,25 +822,39 @@ public class RegionStates {
   public Map> getSnapShotOfAssignment(
   final Collection regions) {
 final Map> result = new HashMap>();
-for (RegionInfo hri: regions) {
-  final RegionStateNode node = getRegionStateNode(hri);
-  if (node == null) continue;
-
-  // TODO: State.OPEN
-  final ServerName serverName = node.getRegionLocation();
-  if (serverName == null) continue;
-
-  List serverRegions = result.get(serverName);
-  if (serverRegions == null) {
-serverRegions = new ArrayList();
-result.put(serverName, serverRegions);
+if (regions != null) {
+  for (RegionInfo hri : regions) {
+final RegionStateNode node = getRegionStateNode(hri);
+if (node == null) {
+  continue;
+}
+createSnapshot(node, result);
+  }
+} else {
+  for (RegionStateNode node : regionsMap.values()) {
+if (node == null) {
+  continue;
+}
+createSnapshot(node, result);
   }
-
-  serverRegions.add(node.getRegionInfo());
 }
 return result;
   }
 
+  private void createSnapshot(RegionStateNode node, Map> result) {
+final ServerName serverName = node.getRegionLocation();
+if (serverName == null) {
+  return;
+}
+
+List serverRegions = result.get(serverName);
+if (serverRegions == null) {
+  serverRegions = new ArrayList();
+  result.put(serverName, serverRegions);
+}
+serverRegions.add(node.getRegionInfo());
+  }
+
   public Map getRegionAssignments() {
 final HashMap assignments = new 
HashMap();
 for (RegionStateNode node: regionsMap.values()) {
@@ -1127,6 +1143,26 @@ public class RegionStates {
 return serverNode;
   }
 
+  public boolean isReplicaAvailableForRegion(final RegionInfo info) {
+// if the region info itself is a replica return true.
+if (!RegionReplicaUtil.isDefaultReplica(info)) {
+  return true;
+}
+// iterate the regionsMap for the given region name. If there are replicas 
it should
+// list them in order.
+for (RegionStateNode node : 
regionsMap.tailMap(info.getRegionName()).values()) {
+  if (!node.getTable().equals(info.getTable())
+  || !ServerRegionReplicaUtil.isReplicasForSameRegion(info, 
node.getRegionInfo())) {
+break;
+  } else if 

hbase git commit: HBASE-21160 Assertion in TestVisibilityLabelsWithDeletes#testDeleteColumnsWithoutAndWithVisibilityLabels is ignored (liubangchen)

2018-09-17 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 6d7bc0e98 -> 1cf920db4


HBASE-21160 Assertion in 
TestVisibilityLabelsWithDeletes#testDeleteColumnsWithoutAndWithVisibilityLabels 
is ignored (liubangchen)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1cf920db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1cf920db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1cf920db

Branch: refs/heads/master
Commit: 1cf920db4395926c634ed0946a57dfa52227472f
Parents: 6d7bc0e
Author: tedyu 
Authored: Mon Sep 17 08:25:11 2018 -0700
Committer: tedyu 
Committed: Mon Sep 17 08:25:11 2018 -0700

--
 .../visibility/TestVisibilityLabels.java|  9 
 ...ibilityLabelsWithDefaultVisLabelService.java | 23 ++--
 .../TestVisibilityLabelsWithDeletes.java| 20 -
 3 files changed, 16 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1cf920db/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
index e106434..770bf3e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
@@ -423,6 +423,7 @@ public abstract class TestVisibilityLabels {
 try (Connection conn = ConnectionFactory.createConnection(conf)) {
   VisibilityClient.setAuths(conn, auths, user);
 } catch (Throwable e) {
+  throw new IOException(e);
 }
 return null;
   }
@@ -450,7 +451,7 @@ public abstract class TestVisibilityLabels {
 try (Connection conn = ConnectionFactory.createConnection(conf)) {
   authsResponse = VisibilityClient.getAuths(conn, user);
 } catch (Throwable e) {
-  fail("Should not have failed");
+  throw new IOException(e);
 }
 List authsList = new 
ArrayList<>(authsResponse.getAuthList().size());
 for (ByteString authBS : authsResponse.getAuthList()) {
@@ -475,7 +476,7 @@ public abstract class TestVisibilityLabels {
   try {
 authsResponse = VisibilityClient.getAuths(conn, user);
   } catch (Throwable e) {
-fail("Should not have failed");
+throw new IOException(e);
   }
 } catch (Throwable e) {
 }
@@ -515,7 +516,7 @@ public abstract class TestVisibilityLabels {
 try (Connection conn = ConnectionFactory.createConnection(conf)) {
   VisibilityClient.setAuths(conn, auths, user);
 } catch (Throwable e) {
-  fail("Should not have failed");
+  throw new IOException(e);
 }
 // Removing the auths for SECRET and CONFIDENTIAL for the user.
 // Passing a non existing auth also.
@@ -553,7 +554,7 @@ public abstract class TestVisibilityLabels {
 try (Connection conn = ConnectionFactory.createConnection(conf)) {
   authsResponse = VisibilityClient.getAuths(conn, user);
 } catch (Throwable e) {
-  fail("Should not have failed");
+  throw new IOException(e);
 }
 List authsList = new 
ArrayList<>(authsResponse.getAuthList().size());
 for (ByteString authBS : authsResponse.getAuthList()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1cf920db/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
index 0d7ff68..5f8acfb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
@@ -22,7 +22,6 @@ import static 
org.apache.hadoop.hbase.security.visibility.VisibilityUtils.SYSTEM
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import com.google.protobuf.ByteString;
 import java.io.IOException;
@@ -94,7 +93,7 @@ public class 

[36/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index 88659fa..6fc579b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -247,7 +247,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.master.HMaster
-abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkIfShouldMoveSystemRegionAsync,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap,
 createNamespace,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 decommissionRegionSer
 vers, decorateMasterConfiguration,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 enableReplicationPeer, enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterMetrics,
 getClusterMetrics,
 getClusterMetricsWi
 thoutCoprocessor, getClusterMetricsWithoutCoprocessor,
 getClusterSchema,
 getDumpServlet,
 getFavoredNodesManager,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLocks,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterFinishedInitializationTime,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices,
 getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount,
 getMetaTableObserver,
 getMobCompactionState,
 getNamespace,
 getNamespaces,
 getNumWALFiles,
 getProcedures, getProcessName,
 getQuotaObserverChore,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 getRegionServerFatalLogBuffer,
 getRegionServerInfoPort,
 getRegionServerVersion,
 getRemoteInetAddress,
 getReplicationLoad,
 getReplicationPeerConfig,
 getReplicationPeerManager,
 getServerManager,
 getServerName,
 getSnapshotManager,
 getSnapshotQuotaObserverChore,
 getSpaceQuotaSnapshotNotifier,
 getSplitOrMergeTracker,
 getSplitPlanCount,
 getSyncReplicationReplayWALManager,
 getTableDescriptors,
 getTableRegionForRow,
 getTableStateManager,
 getUseThisHostnameInstead, getWalProcedureStore,
 getZooKeeper,
 initClusterSchemaService,
 initializeZKBasedSystemTrackers,
 isActiveMaster,
 isBalancerOn,
 isCatalogJanitorEnabled,
 isCleanerChoreEnabled,
 isInitialized,
 isInMaintenanceMode,
 isNormalizerOn,
 isSplitOrMergeEnabled,
 listDecommissionedRegionServers,
 listReplicationPeers,
 listTableDescriptors,
 listTableDescriptorsByNamespace,
 listTableNames,
 listTableNamesByNamespace,
 login,
 main,
 mergeRegions,
 modifyColumn<
 /a>, modifyNamespace,
 modifyTable,
 move,
 normalizeRegions,
 recommissionRegionServer,
 registerService,
 remoteProcedureComplet
 ed, remoteProcedureFailed,
 removeReplicationPeer,
 reportMobCompactionEnd,
 reportMobCompactionStart,
 requestMobCompaction,
 restoreSnapshot, setCatalogJanitorEnabled,
 setInitialized,
 shutdown,
 splitRegion,
 stop,
 stopMaster,
 stopServiceThreads,
 transitReplicationPeerSyncReplicationState, 
truncateTable,
 updateConfigurationForQuotasObserver,
 updateReplicationPeerConfig,
 waitForMasterActive
+abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkIfShouldMoveSystemRegionAsync,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap,
 createNamespace,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 decommissionRegionSer
 vers, decorateMasterConfiguration,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 enableReplicationPeer, enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterMetrics,
 getClusterMetrics,
 getClusterMetricsWi
 thoutCoprocessor, getClusterMetricsWithoutCoprocessor,
 getClusterSchema,
 getDumpServlet,
 getFavoredNodesManager,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLocks,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterFinishedInitializationTime,
 getMasterMetrics,
 

[25/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index d2da8f4..27235ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 

[38/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/HalfStoreFileReader.html 
b/devapidocs/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
index cb3f7f5..2fcd8c9 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -134,6 +134,21 @@ extends 
 
 
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.regionserver.StoreFileReader
+StoreFileReader.Listener
+
+
+
 
 
 
@@ -277,7 +292,7 @@ extends StoreFileReader
-close,
 getBloomFilterType,
 getComparator,
 getDeleteFamilyCnt,
 getHFileMinorVersion,
 getHFileReader,
 getHFileVersion,
 getLastRowKey,
 getMaxTimestamp,
 getScanner,
 getSequenceID,
 getStoreFileScanner,
 getTotalBloomSize,
 getTotalUncompressedBytes,
 indexSize,
 isBulkLoaded, href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#isPrimaryReplicaReader--">isPrimaryReplicaReader,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#length--">length,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadBloomfilter--">loadBloomfilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadBloomfilter-org.apache.hadoop.hbase.io.hfile.BlockType-">loadBloomfilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadFileInfo--">loadFileInfo,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#passesDeleteFamilyBloomFilter-byte:A-int-int-">passesDeleteFamilyBloomFilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#passesGeneralRowBloomFilter-byte:A-int-int-">passesGeneralRowBloomFilter,
 > passesGeneralRowColBloomFilter,
 setBulkLoaded,
 setDeleteFamilyBloomFilterFaulty,
 setGeneralBloomFilterFaulty,
 setSequenceID
+close,
 getBloomFilterType,
 getComparator,
 getDeleteFamilyCnt,
 getHFileMinorVersion,
 getHFileReader,
 getHFileVersion,
 getLastRowKey,
 getMaxTimestamp,
 getScanner,
 getSequenceID,
 getStoreFileScanner,
 getTotalBloomSize,
 getTotalUncompressedBytes,
 indexSize,
 isBulkLoaded, href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#isPrimaryReplicaReader--">isPrimaryReplicaReader,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#length--">length,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadBloomfilter--">loadBloomfilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadBloomfilter-org.apache.hadoop.hbase.io.hfile.BlockType-">loadBloomfilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#loadFileInfo--">loadFileInfo,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#passesDeleteFamilyBloomFilter-byte:A-int-int-">passesDeleteFamilyBloomFilter,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreFileReader.html#passesGeneralRowBloomFilter-byte:A-int-int-">passesGeneralRowBloomFilter,
 > passesGeneralRowColBloomFilter,
 setBulkLoaded,
 setDeleteFamilyBloomFilterFaulty,
 setGeneralBloomFilterFaulty,
 setListener,
 setSequenceID
 
 
 
@@ -606,7 +621,7 @@ extends 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index c6b5daf..a6e5222 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -277,9 +277,9 @@
 org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 org.apache.hadoop.hbase.io.hfile.BlockPriority
 org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
-org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
 org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
+org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
+org.apache.hadoop.hbase.io.hfile.BlockType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index 7557e4f..427b9f5 100644
--- 

hbase-site git commit: INFRA-10751 Empty commit

2018-09-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 738e976e8 -> bfd2ee27f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/bfd2ee27
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/bfd2ee27
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/bfd2ee27

Branch: refs/heads/asf-site
Commit: bfd2ee27fe0fa5cb70a10643fe4cd97d6b5ddf61
Parents: 738e976
Author: jenkins 
Authored: Mon Sep 17 14:53:31 2018 +
Committer: jenkins 
Committed: Mon Sep 17 14:53:31 2018 +

--

--




[33/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 87c92cc..68f47a5 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -438,18 +438,18 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.KeepDeletedCells
-org.apache.hadoop.hbase.MemoryCompactionPolicy
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
-org.apache.hadoop.hbase.ClusterMetrics.Option
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.KeyValue.Type
-org.apache.hadoop.hbase.HConstants.OperationStatusCode
-org.apache.hadoop.hbase.CellBuilderType
 org.apache.hadoop.hbase.Cell.Type
 org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.HConstants.OperationStatusCode
 org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.Size.Unit
+org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.ClusterMetrics.Option
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
 org.apache.hadoop.hbase.CompareOperator
+org.apache.hadoop.hbase.Size.Unit
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 98ed3ea..1bc57ec 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -216,11 +216,11 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.RootProcedureState.State
 org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
-org.apache.hadoop.hbase.procedure2.LockedResourceType
 org.apache.hadoop.hbase.procedure2.Procedure.LockState
 org.apache.hadoop.hbase.procedure2.LockType
+org.apache.hadoop.hbase.procedure2.LockedResourceType
+org.apache.hadoop.hbase.procedure2.RootProcedureState.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 49f5ce0..f029496 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,12 +229,12 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type
-org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
-org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
-org.apache.hadoop.hbase.quotas.ThrottlingException.Type
 org.apache.hadoop.hbase.quotas.ThrottleType
+org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
+org.apache.hadoop.hbase.quotas.QuotaScope
+org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type
 org.apache.hadoop.hbase.quotas.QuotaType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html

[26/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index d2da8f4..27235ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 

[30/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
index b635526..e6791f6 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -124,6 +124,25 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static interface
+StoreFileReader.Listener
+
+
+
+
 
 
 
@@ -145,25 +164,33 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 bulkLoadResult
 
 
+private boolean
+closed
+
+
 protected BloomFilter
 deleteFamilyBloomFilter
 
-
+
 private long
 deleteFamilyCnt
 
-
+
 protected BloomFilter
 generalBloomFilter
 
-
+
 private byte[]
 lastBloomKey
 
-
+
 private KeyValue.KeyOnlyKeyValue
 lastBloomKeyOnlyKV
 
+
+private StoreFileReader.Listener
+listener
+
 
 private static org.slf4j.Logger
 LOG
@@ -486,9 +513,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-setSequenceID(longsequenceID)
+setListener(StoreFileReader.Listenerlistener)
 
 
+void
+setSequenceID(longsequenceID)
+
+
 (package private) void
 setSkipResetSeqId(booleanskipResetSeqId)
 
@@ -634,12 +665,30 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-
+
 
 shared
 finalboolean shared
 
 
+
+
+
+
+
+listener
+private volatileStoreFileReader.Listener listener
+
+
+
+
+
+
+
+closed
+privateboolean closed
+
+
 
 
 
@@ -654,7 +703,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 StoreFileReader
-privateStoreFileReader(HFile.Readerreader,
+privateStoreFileReader(HFile.Readerreader,
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegerrefCount,
 booleanshared)
 
@@ -665,7 +714,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 StoreFileReader
-publicStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+publicStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
CacheConfigcacheConf,
booleanprimaryReplicaStoreFile,
@@ -685,7 +734,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 StoreFileReader
-publicStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
+publicStoreFileReader(org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.Pathpath,
FSDataInputStreamWrapperin,
longsize,
@@ -707,7 +756,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 StoreFileReader
-StoreFileReader()
+StoreFileReader()
 ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS
 
 
@@ -725,7 +774,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 copyFields
-voidcopyFields(StoreFileReaderreader)
+voidcopyFields(StoreFileReaderreader)
 
 
 
@@ -734,7 +783,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isPrimaryReplicaReader
-publicbooleanisPrimaryReplicaReader()
+publicbooleanisPrimaryReplicaReader()
 
 
 
@@ -743,7 +792,7 @@ extends 

[27/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index d2da8f4..27235ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 

[34/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
index 2348fa8..ea3def5 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterWalManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":42,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var methods = 
{"i0":10,"i1":42,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -241,7 +241,10 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName
-getLiveServersFromWALDir()
+getLiveServersFromWALDir()
+Get Servernames that COULD BE 'alive'; excludes those that 
have a '-splitting' suffix as these
+ are already being split -- they cannot be 'alive'.
+
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path
@@ -254,37 +257,51 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName
+getServerNamesFromWALDirPath(org.apache.hadoop.fs.PathFilterfilter)
+
+
 (package private) SplitLogManager
 getSplitLogManager()
 
-
+
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName
+getSplittingServersFromWALDir()
+Get Servernames which are currently splitting; paths have a 
'-splitting' suffix.
+
+
+
+org.apache.hadoop.fs.FileStatus[]
+getWALDirPaths(org.apache.hadoop.fs.PathFilterfilter)
+
+
 void
 splitLog(ServerNameserverName)
 
-
+
 void
 splitLog(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames)
 
-
+
 void
 splitLog(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames,
 org.apache.hadoop.fs.PathFilterfilter)
 This method is the base split method that splits WAL files 
matching a filter.
 
 
-
+
 void
 splitMetaLog(ServerNameserverName)
 Specialized method to handle the splitting for meta 
WAL
 
 
-
+
 void
 splitMetaLog(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNameserverNames)
 Specialized method to handle the splitting for meta 
WAL
 
 
-
+
 void
 stop()
 
@@ -521,19 +538,68 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+getSplittingServersFromWALDir
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamegetSplittingServersFromWALDir()
+  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Get Servernames which are currently splitting; paths have a 
'-splitting' suffix.
+
+Returns:
+ServerName
+Throws:
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - IOException
+
+
+
 
 
 
 
 
 getLiveServersFromWALDir
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamegetLiveServersFromWALDir()
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamegetLiveServersFromWALDir()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Get Servernames that COULD BE 'alive'; excludes those that 
have a '-splitting' suffix as these
+ are already being split -- they cannot be 'alive'.
+
+Returns:
+ServerName
+Throws:
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - IOException
+
+
+
+
+
+
+
+
+getServerNamesFromWALDirPath

[23/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 17ea7d8..b56bd67 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -542,1814 +542,1813 @@
 534  RpcController controller, 
ReportRSFatalErrorRequest request) throws ServiceException {
 535String errorText = 
request.getErrorMessage();
 536ServerName sn = 
ProtobufUtil.toServerName(request.getServer());
-537String msg = "Region server " + sn
-538  + " reported a fatal error:\n" + 
errorText;
-539LOG.error(msg);
-540master.rsFatals.add(msg);
-541return 
ReportRSFatalErrorResponse.newBuilder().build();
-542  }
-543
-544  @Override
-545  public AddColumnResponse 
addColumn(RpcController controller,
-546  AddColumnRequest req) throws 
ServiceException {
-547try {
-548  long procId = master.addColumn(
-549  
ProtobufUtil.toTableName(req.getTableName()),
-550  
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
-551  req.getNonceGroup(),
-552  req.getNonce());
-553  if (procId == -1) {
-554// This mean operation was not 
performed in server, so do not set any procId
-555return 
AddColumnResponse.newBuilder().build();
-556  } else {
-557return 
AddColumnResponse.newBuilder().setProcId(procId).build();
-558  }
-559} catch (IOException ioe) {
-560  throw new ServiceException(ioe);
-561}
-562  }
-563
-564  @Override
-565  public AssignRegionResponse 
assignRegion(RpcController controller,
-566  AssignRegionRequest req) throws 
ServiceException {
-567try {
-568  master.checkInitialized();
-569
-570  final RegionSpecifierType type = 
req.getRegion().getType();
-571  if (type != 
RegionSpecifierType.REGION_NAME) {
-572LOG.warn("assignRegion specifier 
type: expected: " + RegionSpecifierType.REGION_NAME
-573  + " actual: " + type);
-574  }
-575
-576  final byte[] regionName = 
req.getRegion().getValue().toByteArray();
-577  final RegionInfo regionInfo = 
master.getAssignmentManager().getRegionInfo(regionName);
-578  if (regionInfo == null) throw new 
UnknownRegionException(Bytes.toStringBinary(regionName));
-579
-580  final AssignRegionResponse arr = 
AssignRegionResponse.newBuilder().build();
-581  if (master.cpHost != null) {
-582
master.cpHost.preAssign(regionInfo);
-583  }
-584  
LOG.info(master.getClientIdAuditPrefix() + " assign " + 
regionInfo.getRegionNameAsString());
-585  
master.getAssignmentManager().assign(regionInfo);
-586  if (master.cpHost != null) {
-587
master.cpHost.postAssign(regionInfo);
-588  }
-589  return arr;
-590} catch (IOException ioe) {
-591  throw new ServiceException(ioe);
-592}
-593  }
+537String msg = sn + " reported a fatal 
error:\n" + errorText;
+538LOG.warn(msg);
+539master.rsFatals.add(msg);
+540return 
ReportRSFatalErrorResponse.newBuilder().build();
+541  }
+542
+543  @Override
+544  public AddColumnResponse 
addColumn(RpcController controller,
+545  AddColumnRequest req) throws 
ServiceException {
+546try {
+547  long procId = master.addColumn(
+548  
ProtobufUtil.toTableName(req.getTableName()),
+549  
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
+550  req.getNonceGroup(),
+551  req.getNonce());
+552  if (procId == -1) {
+553// This mean operation was not 
performed in server, so do not set any procId
+554return 
AddColumnResponse.newBuilder().build();
+555  } else {
+556return 
AddColumnResponse.newBuilder().setProcId(procId).build();
+557  }
+558} catch (IOException ioe) {
+559  throw new ServiceException(ioe);
+560}
+561  }
+562
+563  @Override
+564  public AssignRegionResponse 
assignRegion(RpcController controller,
+565  AssignRegionRequest req) throws 
ServiceException {
+566try {
+567  master.checkInitialized();
+568
+569  final RegionSpecifierType type = 
req.getRegion().getType();
+570  if (type != 
RegionSpecifierType.REGION_NAME) {
+571LOG.warn("assignRegion specifier 
type: expected: " + RegionSpecifierType.REGION_NAME
+572  + " actual: " + type);
+573  }
+574
+575  final byte[] regionName = 
req.getRegion().getValue().toByteArray();
+576  final RegionInfo regionInfo = 
master.getAssignmentManager().getRegionInfo(regionName);
+577  if (regionInfo == null) throw new 

[31/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html
index 0ddf199..ec635e7 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,14 +109,14 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-StoreFile
+StoreFile, StoreFileReader.Listener
 
 
 
 @InterfaceAudience.Private
-public class HStoreFile
+public class HStoreFile
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements StoreFile
+implements StoreFile, StoreFileReader.Listener
 A Store data file.  Stores usually have one or more of 
these files.  They
  are produced by flushing the memstore to disk.  To
  create, instantiate a writer using StoreFileWriter.Builder
@@ -295,6 +295,10 @@ implements STORE_FILE_READER_NO_READAHEAD
 
 
+private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetStoreFileReader
+streamReaders
+
+
 static byte[]
 TIMERANGE_KEY
 Key for Timerange information in metadata
@@ -354,100 +358,104 @@ implements closeStoreFile(booleanevictOnClose)
 
 
+void
+closeStreamReaders(booleanevictOnClose)
+
+
 private StoreFileReader
 createStreamReader(booleancanUseDropBehind)
 
-
+
 void
 deleteStoreFile()
 Delete this file
 
 
-
+
 boolean
 excludeFromMinorCompaction()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/OptionalLong.html?is-external=true;
 title="class or interface in java.util">OptionalLong
 getBulkLoadTimestamp()
 Return the timestamp at which this bulk load file was 
generated.
 
 
-
+
 CacheConfig
 getCacheConf()
 
-
+
 CellComparator
 getComparator()
 Get the comparator for comparing two cells.
 
 
-
+
 StoreFileInfo
 getFileInfo()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCell
 getFirstKey()
 Get the first key in this store file.
 
 
-
+
 HDFSBlocksDistribution
 getHDFSBlockDistribution()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCell
 getLastKey()
 Get the last key in this store file.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/OptionalLong.html?is-external=true;
 title="class or interface in java.util">OptionalLong
 getMaximumTimestamp()
 Get the max timestamp of all the cells in the store 
file.
 
 
-
+
 long
 getMaxMemStoreTS()
 Get max of the MemstoreTS in the KV's in this store 
file.
 
 
-
+
 long
 getMaxSequenceId()
 
-
+
 byte[]
 getMetadataValue(byte[]key)
 Only used by the Striped Compaction Policy
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/OptionalLong.html?is-external=true;
 title="class or interface in java.util">OptionalLong
 getMinimumTimestamp()
 Get the min timestamp of all the cells in the store 
file.
 
 
-
+
 long
 getModificationTimestamp()
 Get the modification time of this store file.
 
 
-
+
 long
 getModificationTimeStamp()
 Get the modification time of this store file.
 
 
-
+
 org.apache.hadoop.fs.Path
 getPath()
 
-
+
 StoreFileScanner
 getPreadScanner(booleancacheBlocks,
longreadPt,
@@ -456,19 +464,19 @@ implements Get a scanner which uses pread.
 
 
-
+
 org.apache.hadoop.fs.Path
 getQualifiedPath()
 
-
+
 StoreFileReader
 getReader()
 
-
+
 int
 getRefCount()
 
-
+
 StoreFileScanner
 getStreamScanner(booleancanUseDropBehind,
 booleancacheBlocks,
@@ -479,59 +487,63 @@ implements Get a scanner which uses streaming read.
 
 
-
+
 void
 initReader()
 Initialize the reader used for pread.
 
 
-
+
 boolean
 isBulkLoadResult()
 Check if this storefile was created by bulk load.
 
 
-

[02/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.html
index 8c8cc19..ad0629d 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.html
@@ -54,838 +54,854 @@
 046import 
org.apache.hadoop.hbase.ipc.DelegatingRpcScheduler;
 047import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
 048import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-049import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-050import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-051import 
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-052import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-053import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-054import 
org.apache.hadoop.hbase.util.Bytes;
-055import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-056import 
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.ClassRule;
-063import org.junit.Rule;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.rules.TestName;
-067import org.slf4j.Logger;
-068import org.slf4j.LoggerFactory;
-069
-070import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-071import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-072
-073/**
-074 * Test {@link 
org.apache.hadoop.hbase.MetaTableAccessor}.
-075 */
-076@Category({MiscTests.class, 
MediumTests.class})
-077@SuppressWarnings("deprecation")
-078public class TestMetaTableAccessor {
-079
-080  @ClassRule
-081  public static final HBaseClassTestRule 
CLASS_RULE =
-082  
HBaseClassTestRule.forClass(TestMetaTableAccessor.class);
-083
-084  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaTableAccessor.class);
-085  private static final  
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-086  private static Connection connection;
-087  private Random random = new Random();
-088
-089  @Rule
-090  public TestName name = new 
TestName();
-091
-092  @BeforeClass public static void 
beforeClass() throws Exception {
-093UTIL.startMiniCluster(3);
-094
-095Configuration c = new 
Configuration(UTIL.getConfiguration());
-096// Tests to 4 retries every 5 
seconds. Make it try every 1 second so more
-097// responsive.  1 second is default 
as is ten retries.
-098c.setLong("hbase.client.pause", 
1000);
-099
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-100connection = 
ConnectionFactory.createConnection(c);
-101  }
-102
-103  @AfterClass public static void 
afterClass() throws Exception {
-104connection.close();
-105UTIL.shutdownMiniCluster();
-106  }
-107
-108  /**
-109   * Does {@link 
MetaTableAccessor#getRegion(Connection, byte[])} and a write
-110   * against hbase:meta while its hosted 
server is restarted to prove our retrying
-111   * works.
-112   */
-113  @Test public void testRetrying()
-114  throws IOException, 
InterruptedException {
-115final TableName tableName = 
TableName.valueOf(name.getMethodName());
-116LOG.info("Started " + tableName);
-117Table t = 
UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
-118int regionCount = -1;
-119try (RegionLocator r = 
UTIL.getConnection().getRegionLocator(tableName)) {
-120  regionCount = 
r.getStartKeys().length;
-121}
-122// Test it works getting a region 
from just made user table.
-123final ListRegionInfo regions 
=
-124  testGettingTableRegions(connection, 
tableName, regionCount);
-125MetaTask reader = new 
MetaTask(connection, "reader") {
-126  @Override
-127  void metaTask() throws Throwable 
{
-128testGetRegion(connection, 
regions.get(0));
-129LOG.info("Read " + 
regions.get(0).getEncodedName());
-130  }
-131};
-132MetaTask writer = new 
MetaTask(connection, "writer") {
-133  @Override
-134  void metaTask() throws Throwable 
{
-135
MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
-136LOG.info("Wrote " + 
regions.get(0).getEncodedName());
-137  }
-138};
-139reader.start();
-140writer.start();
-141
-142// We're gonna check how it takes. If 
it takes too long, we will consider
-143//  it as a fail. We can't put that 
in the @Test tag as we want to close
-144//  the threads nicely
-145final long timeOut = 18;
-146long startTime = 

[14/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index 34b8b52..3f9d8d2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -935,7 +935,7 @@
 927  
storeEngine.getStoreFileManager().clearCompactedFiles();
 928  // clear the compacted files
 929  if 
(CollectionUtils.isNotEmpty(compactedfiles)) {
-930
removeCompactedfiles(compactedfiles);
+930
removeCompactedfiles(compactedfiles, true);
 931  }
 932  if (!result.isEmpty()) {
 933// initialize the thread pool for 
closing store files in parallel.
@@ -2583,190 +2583,221 @@
 2575   * Closes and archives the compacted 
files under this store
 2576   */
 2577  public synchronized void 
closeAndArchiveCompactedFiles() throws IOException {
-2578// ensure other threads do not 
attempt to archive the same files on close()
-2579archiveLock.lock();
-2580try {
-2581  lock.readLock().lock();
-2582  CollectionHStoreFile 
copyCompactedfiles = null;
-2583  try {
-2584CollectionHStoreFile 
compactedfiles =
-2585
this.getStoreEngine().getStoreFileManager().getCompactedfiles();
-2586if 
(CollectionUtils.isNotEmpty(compactedfiles)) {
-2587  // Do a copy under read lock
-2588  copyCompactedfiles = new 
ArrayList(compactedfiles);
-2589} else {
-2590  LOG.trace("No compacted files 
to archive");
-2591}
-2592  } finally {
-2593lock.readLock().unlock();
-2594  }
-2595  if 
(CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-2596
removeCompactedfiles(copyCompactedfiles);
-2597  }
-2598} finally {
-2599  archiveLock.unlock();
-2600}
-2601  }
-2602
-2603  /**
-2604   * Archives and removes the compacted 
files
-2605   * @param compactedfiles The compacted 
files in this store that are not active in reads
-2606   */
-2607  private void 
removeCompactedfiles(CollectionHStoreFile compactedfiles)
-2608  throws IOException {
-2609final ListHStoreFile 
filesToRemove = new ArrayList(compactedfiles.size());
-2610final ListLong 
storeFileSizes = new ArrayList(compactedfiles.size());
-2611for (final HStoreFile file : 
compactedfiles) {
-2612  synchronized (file) {
-2613try {
-2614  StoreFileReader r = 
file.getReader();
-2615  if (r == null) {
-2616LOG.debug("The file {} was 
closed but still not archived", file);
-2617// HACK: Temporarily re-open 
the reader so we can get the size of the file. Ideally,
-2618// we should know the size 
of an HStoreFile without having to ask the HStoreFileReader
-2619// for that.
-2620long length = 
getStoreFileSize(file);
-2621filesToRemove.add(file);
-2622
storeFileSizes.add(length);
-2623continue;
-2624  }
-2625  if (file.isCompactedAway() 
 !file.isReferencedInReads()) {
-2626// Even if deleting fails we 
need not bother as any new scanners won't be
-2627// able to use the compacted 
file as the status is already compactedAway
-2628LOG.trace("Closing and 
archiving the file {}", file);
-2629// Copy the file size before 
closing the reader
-2630final long length = 
r.length();
-2631r.close(true);
-2632// Just close and return
-2633filesToRemove.add(file);
-2634// Only add the length if we 
successfully added the file to `filesToRemove`
-2635
storeFileSizes.add(length);
-2636  } else {
-2637LOG.info("Can't archive 
compacted file " + file.getPath()
-2638+ " because of either 
isCompactedAway = " + file.isCompactedAway()
-2639+ " or file has 
reference, isReferencedInReads = " + file.isReferencedInReads()
-2640+ ", skipping for 
now.");
-2641  }
-2642} catch (Exception e) {
-2643  LOG.error("Exception while 
trying to close the compacted store file {}",
-2644file.getPath(), e);
-2645}
-2646  }
-2647}
-2648if (this.isPrimaryReplicaStore()) 
{
-2649  // Only the primary region is 
allowed to move the file to archive.
-2650  // The secondary region does not 
move the files to archive. Any active reads from
-2651  // the secondary region will still 
work because the file as such has active readers on it.
-2652  if (!filesToRemove.isEmpty()) {
-2653LOG.debug("Moving the files {} 
to archive", filesToRemove);
-2654// Only if this is successful it 
has 

[17/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
index 460238a..508fc27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
@@ -269,7 +269,7 @@
 261  TableStateManager tsm = 
env.getMasterServices().getTableStateManager();
 262  TableState ts = 
tsm.getTableState(tableName);
 263  if (!ts.isEnabled()) {
-264LOG.info("Not ENABLED skipping 
{}", this);
+264LOG.info("Not ENABLED, state={}, 
skipping disable; {}", ts.getState(), this);
 265
setFailure("master-disable-table", new 
TableNotEnabledException(ts.toString()));
 266canTableBeDisabled = false;
 267  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
index 95b7624..a87bdc6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.html
@@ -339,7 +339,7 @@
 331  TableStateManager tsm = 
env.getMasterServices().getTableStateManager();
 332  TableState ts = 
tsm.getTableState(tableName);
 333  if(!ts.isDisabled()){
-334LOG.info("Not DISABLED 
tableState=" + ts + "; skipping enable");
+334LOG.info("Not DISABLED 
tableState={}; skipping enable; {}", ts.getState(), this);
 335setFailure("master-enable-table", 
new TableNotDisabledException(ts.toString()));
 336canTableBeEnabled = false;
 337  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index e074a8c..8cc5add 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -722,172 +722,172 @@
 714  
"hbase.regionserver.kerberos.principal", host);
 715  }
 716
-717  protected void waitForMasterActive() 
{
-718  }
-719
-720  protected String getProcessName() {
-721return REGIONSERVER;
-722  }
+717
+718  /**
+719   * Wait for an active Master.
+720   * See override in Master superclass 
for how it is used.
+721   */
+722  protected void waitForMasterActive() 
{}
 723
-724  protected boolean canCreateBaseZNode() 
{
-725return this.masterless;
+724  protected String getProcessName() {
+725return REGIONSERVER;
 726  }
 727
-728  protected boolean 
canUpdateTableDescriptor() {
-729return false;
+728  protected boolean canCreateBaseZNode() 
{
+729return this.masterless;
 730  }
 731
-732  protected RSRpcServices 
createRpcServices() throws IOException {
-733return new RSRpcServices(this);
+732  protected boolean 
canUpdateTableDescriptor() {
+733return false;
 734  }
 735
-736  protected void configureInfoServer() 
{
-737infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
-738infoServer.setAttribute(REGIONSERVER, 
this);
-739  }
-740
-741  protected Class? extends 
HttpServlet getDumpServlet() {
-742return RSDumpServlet.class;
+736  protected RSRpcServices 
createRpcServices() throws IOException {
+737return new RSRpcServices(this);
+738  }
+739
+740  protected void configureInfoServer() 
{
+741infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
+742infoServer.setAttribute(REGIONSERVER, 
this);
 743  }
 744
-745  @Override
-746  public boolean 
registerService(com.google.protobuf.Service instance) {
-747/*
-748 * No stacking of instances is 
allowed for a single executorService name
-749 */
-750
com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc =
-751
instance.getDescriptorForType();
-752String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-753if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-754  LOG.error("Coprocessor 

[04/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcScheduler.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcScheduler.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcScheduler.html
index 8c8cc19..ad0629d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcScheduler.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcScheduler.html
@@ -54,838 +54,854 @@
 046import 
org.apache.hadoop.hbase.ipc.DelegatingRpcScheduler;
 047import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
 048import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-049import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-050import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-051import 
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-052import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-053import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-054import 
org.apache.hadoop.hbase.util.Bytes;
-055import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-056import 
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.ClassRule;
-063import org.junit.Rule;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.rules.TestName;
-067import org.slf4j.Logger;
-068import org.slf4j.LoggerFactory;
-069
-070import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-071import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-072
-073/**
-074 * Test {@link 
org.apache.hadoop.hbase.MetaTableAccessor}.
-075 */
-076@Category({MiscTests.class, 
MediumTests.class})
-077@SuppressWarnings("deprecation")
-078public class TestMetaTableAccessor {
-079
-080  @ClassRule
-081  public static final HBaseClassTestRule 
CLASS_RULE =
-082  
HBaseClassTestRule.forClass(TestMetaTableAccessor.class);
-083
-084  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaTableAccessor.class);
-085  private static final  
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-086  private static Connection connection;
-087  private Random random = new Random();
-088
-089  @Rule
-090  public TestName name = new 
TestName();
-091
-092  @BeforeClass public static void 
beforeClass() throws Exception {
-093UTIL.startMiniCluster(3);
-094
-095Configuration c = new 
Configuration(UTIL.getConfiguration());
-096// Tests to 4 retries every 5 
seconds. Make it try every 1 second so more
-097// responsive.  1 second is default 
as is ten retries.
-098c.setLong("hbase.client.pause", 
1000);
-099
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-100connection = 
ConnectionFactory.createConnection(c);
-101  }
-102
-103  @AfterClass public static void 
afterClass() throws Exception {
-104connection.close();
-105UTIL.shutdownMiniCluster();
-106  }
-107
-108  /**
-109   * Does {@link 
MetaTableAccessor#getRegion(Connection, byte[])} and a write
-110   * against hbase:meta while its hosted 
server is restarted to prove our retrying
-111   * works.
-112   */
-113  @Test public void testRetrying()
-114  throws IOException, 
InterruptedException {
-115final TableName tableName = 
TableName.valueOf(name.getMethodName());
-116LOG.info("Started " + tableName);
-117Table t = 
UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
-118int regionCount = -1;
-119try (RegionLocator r = 
UTIL.getConnection().getRegionLocator(tableName)) {
-120  regionCount = 
r.getStartKeys().length;
-121}
-122// Test it works getting a region 
from just made user table.
-123final ListRegionInfo regions 
=
-124  testGettingTableRegions(connection, 
tableName, regionCount);
-125MetaTask reader = new 
MetaTask(connection, "reader") {
-126  @Override
-127  void metaTask() throws Throwable 
{
-128testGetRegion(connection, 
regions.get(0));
-129LOG.info("Read " + 
regions.get(0).getEncodedName());
-130  }
-131};
-132MetaTask writer = new 
MetaTask(connection, "writer") {
-133  @Override
-134  void metaTask() throws Throwable 
{
-135
MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
-136LOG.info("Wrote " + 
regions.get(0).getEncodedName());
-137  }
-138};
-139reader.start();
-140writer.start();
-141
-142// We're gonna check how it takes. If 
it takes too long, we will consider
-143//  it as a fail. We can't put that 
in the @Test tag as we want to close
-144 

[39/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index a4fe324..0d1a56e 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -13020,6 +13020,8 @@
 
 Closes and archives the compacted files under this 
store
 
+closeAndArchiveCompactedFiles(boolean)
 - Method in class org.apache.hadoop.hbase.regionserver.HStore
+
 closeAndCleanCompleted
 - Variable in class org.apache.hadoop.hbase.wal.WALSplitter.OutputSink
 
 closeAndOfflineRegionForSplitOrMerge(ListString)
 - Method in class org.apache.hadoop.hbase.regionserver.HRegionServer
@@ -13084,6 +13086,8 @@
 
 closed
 - Variable in class org.apache.hadoop.hbase.regionserver.SegmentScanner
 
+closed
 - Variable in class org.apache.hadoop.hbase.regionserver.StoreFileReader
+
 closed
 - Variable in class org.apache.hadoop.hbase.regionserver.StoreFileScanner
 
 closed
 - Variable in class org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL
@@ -13303,6 +13307,8 @@
 
 Closes the readers of store files.
 
+closeStreamReaders(boolean)
 - Method in class org.apache.hadoop.hbase.regionserver.HStoreFile
+
 closeStreams()
 - Method in interface org.apache.hadoop.hbase.io.hfile.HFileBlock.FSReader
 
 Closes the backing streams
@@ -41687,7 +41693,10 @@
 Deprecated.
 
 getLiveServersFromWALDir()
 - Method in class org.apache.hadoop.hbase.master.MasterWalManager
-
+
+Get Servernames that COULD BE 'alive'; excludes those that 
have a '-splitting' suffix as these
+ are already being split -- they cannot be 'alive'.
+
 getLoad(ServerName)
 - Method in class org.apache.hadoop.hbase.ClusterStatus
 
 Deprecated.
@@ -49647,6 +49656,8 @@
 
 getServerNameOrEmptyString(byte[])
 - Static method in class org.apache.hadoop.hbase.zookeeper.ZKUtil
 
+getServerNamesFromWALDirPath(PathFilter)
 - Method in class org.apache.hadoop.hbase.master.MasterWalManager
+
 getServerNode(ServerName)
 - Method in class org.apache.hadoop.hbase.master.assignment.RegionStates
 
 getServerOperationType()
 - Method in class org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure
@@ -50456,6 +50467,10 @@
 
 getSplitThreadNum()
 - Method in class org.apache.hadoop.hbase.regionserver.CompactSplit
 
+getSplittingServersFromWALDir()
 - Method in class org.apache.hadoop.hbase.master.MasterWalManager
+
+Get Servernames which are currently splitting; paths have a 
'-splitting' suffix.
+
 getSrcChecksum()
 - Static method in class org.apache.hadoop.hbase.util.VersionInfo
 
 Get the checksum of the source files from which Hadoop was 
compiled.
@@ -54223,6 +54238,8 @@
 
 Construct the directory name for all WALs on a given 
server.
 
+getWALDirPaths(PathFilter)
 - Method in class org.apache.hadoop.hbase.master.MasterWalManager
+
 getWalEdit(int)
 - Method in class org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress
 
 getWalEntries()
 - Method in class org.apache.hadoop.hbase.replication.regionserver.WALEntryBatch
@@ -64508,6 +64525,8 @@
 
 isRegionOnline(RegionInfo)
 - Method in class org.apache.hadoop.hbase.master.assignment.RegionStates
 
+isRegionOnline(RegionInfo)
 - Method in class org.apache.hadoop.hbase.master.HMaster
+
 isRegionOverThreshold(RegionInfo)
 - Method in class org.apache.hadoop.hbase.master.assignment.AssignmentManager.RegionInTransitionStat
 
 isRegionReplicaReplicationEnabled(Configuration)
 - Static method in class org.apache.hadoop.hbase.util.ServerRegionReplicaUtil
@@ -67330,6 +67349,8 @@
 
 listener
 - Variable in class org.apache.hadoop.hbase.regionserver.Leases.Lease
 
+listener
 - Variable in class org.apache.hadoop.hbase.regionserver.StoreFileReader
+
 ListenerInfo(boolean,
 ServerConnector) - Constructor for class 
org.apache.hadoop.hbase.http.HttpServer.ListenerInfo
 
 listeners
 - Variable in class org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher
@@ -92409,7 +92430,7 @@ service.
 
 removeCompactedFiles(CollectionHStoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.DefaultStoreFileManager
 
-removeCompactedfiles(CollectionHStoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.HStore
+removeCompactedfiles(CollectionHStoreFile,
 boolean) - Method in class org.apache.hadoop.hbase.regionserver.HStore
 
 Archives and removes the compacted files
 
@@ -95684,6 +95705,8 @@ service.
 
 RetryCounterFactory - Class in org.apache.hadoop.hbase.util
 
+RetryCounterFactory(int)
 - Constructor for class org.apache.hadoop.hbase.util.RetryCounterFactory
+
 RetryCounterFactory(int,
 int) - Constructor for class org.apache.hadoop.hbase.util.RetryCounterFactory
 
 RetryCounterFactory(int,
 int, int) - Constructor for class org.apache.hadoop.hbase.util.RetryCounterFactory
@@ -102861,6 +102884,8 @@ service.
 
 setLimiter(MapK,
 QuotaLimiter, K, QuotaProtos.Quotas) - Method in class 
org.apache.hadoop.hbase.quotas.UserQuotaState
 

[08/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
index 4dd2a36..40a78dc 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -582,15 +582,15 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ScanPerformanceEvaluation.ScanCounter
-org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover.ACTION
-org.apache.hadoop.hbase.ResourceChecker.Phase
 org.apache.hadoop.hbase.ClusterManager.ServiceType
-org.apache.hadoop.hbase.RESTApiClusterManager.RoleCommand
+org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover.ACTION
 org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf.Stat
+org.apache.hadoop.hbase.ScanPerformanceEvaluation.ScanCounter
+org.apache.hadoop.hbase.RESTApiClusterManager.RoleCommand
 org.apache.hadoop.hbase.HBaseClusterManager.CommandProvider.Operation
-org.apache.hadoop.hbase.RESTApiClusterManager.Service
 org.apache.hadoop.hbase.PerformanceEvaluation.Counter
+org.apache.hadoop.hbase.RESTApiClusterManager.Service
+org.apache.hadoop.hbase.ResourceChecker.Phase
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/procedure/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure/package-tree.html
index a1c9b1d..28973e0 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure/package-tree.html
@@ -81,14 +81,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
-org.apache.hadoop.hbase.procedure.Procedure (implements 
java.util.concurrent.https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">CallableV, 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener)
+org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment 
(implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT)
 
-org.apache.hadoop.hbase.procedure.TestProcedure.LatchedProcedure
+org.apache.hadoop.hbase.procedure.TestProcedureDescriber.TestProcedure
 
 
-org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment 
(implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT)
+org.apache.hadoop.hbase.procedure.Procedure (implements 
java.util.concurrent.https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">CallableV, 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener)
 
-org.apache.hadoop.hbase.procedure.TestProcedureDescriber.TestProcedure
+org.apache.hadoop.hbase.procedure.TestProcedure.LatchedProcedure
 
 
 org.apache.hadoop.hbase.procedure.ProcedureManager

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/regionserver/MockHStoreFile.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/MockHStoreFile.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/MockHStoreFile.html
index 677bf96..982c5da 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/MockHStoreFile.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/MockHStoreFile.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-org.apache.hadoop.hbase.regionserver.StoreFile
+org.apache.hadoop.hbase.regionserver.StoreFile, 
org.apache.hadoop.hbase.regionserver.StoreFileReader.Listener
 
 
 
@@ -335,7 +335,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HStoreFile
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.regionserver.HStoreFile
-closeStoreFile, deleteStoreFile, excludeFromMinorCompaction, 
getCacheConf, getComparator, getFileInfo, getFirstKey, getLastKey, 
getMaxMemStoreTS, getPath, 

[40/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/738e976e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/738e976e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/738e976e

Branch: refs/heads/asf-site
Commit: 738e976e878fbe2c7fa00054696f60a85ef98101
Parents: bcb8946
Author: jenkins 
Authored: Mon Sep 17 14:53:08 2018 +
Committer: jenkins 
Committed: Mon Sep 17 14:53:08 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |  406 +-
 checkstyle.rss  |   16 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/allclasses-frame.html|1 +
 devapidocs/allclasses-noframe.html  |1 +
 devapidocs/constant-values.html |6 +-
 devapidocs/index-all.html   |   51 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hadoop/hbase/class-use/ServerName.html  |   33 +-
 .../hbase/client/class-use/RegionInfo.html  |   68 +-
 .../hadoop/hbase/client/package-tree.html   |   24 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |2 +-
 .../hadoop/hbase/filter/package-tree.html   |   10 +-
 .../hadoop/hbase/io/HalfStoreFileReader.html|   21 +-
 .../hadoop/hbase/io/hfile/package-tree.html |4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../master/HMaster.InitializationMonitor.html   |   20 +-
 .../master/HMaster.MasterStoppedException.html  |4 +-
 .../hbase/master/HMaster.RedirectServlet.html   |   12 +-
 .../master/HMaster.TableDescriptorGetter.html   |4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |  643 +-
 .../master/HMasterCommandLine.LocalHMaster.html |2 +-
 .../hadoop/hbase/master/MasterRpcServices.html  |  182 +-
 .../hadoop/hbase/master/MasterWalManager.html   |  104 +-
 .../hbase/master/RegionServerTracker.html   |   19 +-
 .../master/assignment/AssignmentManager.html|   88 +-
 .../hadoop/hbase/master/package-tree.html   |4 +-
 .../hbase/master/procedure/package-tree.html|4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   16 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../hadoop/hbase/quotas/package-tree.html   |8 +-
 .../hadoop/hbase/regionserver/HMobStore.html|2 +-
 .../hbase/regionserver/HRegionServer.html   |   34 +-
 .../hadoop/hbase/regionserver/HStore.html   |  302 +-
 .../hadoop/hbase/regionserver/HStoreFile.html   |  266 +-
 .../regionserver/StoreFileReader.Listener.html  |  230 +
 .../hbase/regionserver/StoreFileReader.html |  184 +-
 .../regionserver/StorefileRefresherChore.html   |4 +-
 .../regionserver/class-use/HStoreFile.html  |   17 +-
 .../class-use/StoreFileReader.Listener.html |  193 +
 .../regionserver/class-use/StoreFileReader.html |   21 +
 .../hbase/regionserver/package-frame.html   |1 +
 .../hbase/regionserver/package-summary.html |4 +
 .../hadoop/hbase/regionserver/package-tree.html |   21 +-
 .../hadoop/hbase/regionserver/package-use.html  |   33 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../hadoop/hbase/thrift/package-tree.html   |4 +-
 .../hadoop/hbase/util/RetryCounterFactory.html  |   24 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   10 +-
 devapidocs/overview-tree.html   |3 +-
 .../org/apache/hadoop/hbase/Version.html|6 +-
 .../master/HMaster.InitializationMonitor.html   | 7460 +-
 .../master/HMaster.MasterStoppedException.html  | 7460 +-
 .../hbase/master/HMaster.RedirectServlet.html   | 7460 +-
 .../master/HMaster.TableDescriptorGetter.html   | 7460 +-
 .../org/apache/hadoop/hbase/master/HMaster.html | 7460 +-
 .../MasterRpcServices.BalanceSwitchMode.html| 3613 +
 .../hadoop/hbase/master/MasterRpcServices.html  | 3613 +
 .../hadoop/hbase/master/MasterWalManager.html   |  398 +-
 .../hbase/master/RegionServerTracker.html   |  146 +-
 .../hbase/master/TableNamespaceManager.html |   28 +-
 

[37/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 4e6d5a3..e39c24f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":9,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":9,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":9,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":9,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -1155,25 +1155,29 @@ implements 
+private boolean
+isRegionOnline(RegionInfori)
+
+
 boolean
 isSplitOrMergeEnabled(MasterSwitchTypeswitchType)
 Queries the state of the SplitOrMergeTracker.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
 listDecommissionedRegionServers()
 List region servers marked as decommissioned (previously 
called 'draining') to not get regions
  assigned to them.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListReplicationPeerDescription
 

[19/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index eecf20f..df4d2d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -1218,664 +1218,665 @@
 1210long startTime = 
System.nanoTime();
 1211LOG.debug("Joining cluster...");
 1212
-1213// Scan hbase:meta to build list of 
existing regions, servers, and assignment
-1214// hbase:meta is online when we get 
to here and TableStateManager has been started.
-1215loadMeta();
-1216
-1217while 
(master.getServerManager().countOfRegionServers()  1) {
-1218  LOG.info("Waiting for 
RegionServers to join; current count={}",
-1219
master.getServerManager().countOfRegionServers());
-1220  Threads.sleep(250);
-1221}
-1222LOG.info("Number of 
RegionServers={}", master.getServerManager().countOfRegionServers());
-1223
-1224processOfflineRegions();
-1225
-1226// Start the RIT chore
-1227
master.getMasterProcedureExecutor().addChore(this.ritChore);
-1228
-1229long costMs = 
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
-1230LOG.info("Joined the cluster in {}", 
StringUtils.humanTimeDiff(costMs));
-1231  }
-1232
-1233  // Create assign procedure for offline 
regions.
-1234  // Just follow the old 
processofflineServersWithOnlineRegions method. Since now we do not need to
-1235  // deal with dead server any more, we 
only deal with the regions in OFFLINE state in this method.
-1236  // And this is a bit strange, that for 
new regions, we will add it in CLOSED state instead of
-1237  // OFFLINE state, and usually there 
will be a procedure to track them. The
-1238  // 
processofflineServersWithOnlineRegions is a legacy from long ago, as things are 
going really
-1239  // different now, maybe we do not need 
this method any more. Need to revisit later.
-1240  private void processOfflineRegions() 
{
-1241ListRegionInfo 
offlineRegions = regionStates.getRegionStates().stream()
-1242  
.filter(RegionState::isOffline).filter(s - 
isTableEnabled(s.getRegion().getTable()))
-1243  
.map(RegionState::getRegion).collect(Collectors.toList());
-1244if (!offlineRegions.isEmpty()) {
-1245  
master.getMasterProcedureExecutor().submitProcedures(
-1246
master.getAssignmentManager().createRoundRobinAssignProcedures(offlineRegions));
-1247}
-1248  }
-1249
-1250  private void loadMeta() throws 
IOException {
-1251// TODO: use a thread pool
-1252regionStateStore.visitMeta(new 
RegionStateStore.RegionStateVisitor() {
-1253  @Override
-1254  public void 
visitRegionState(Result result, final RegionInfo regionInfo, final State 
state,
-1255  final ServerName 
regionLocation, final ServerName lastHost, final long openSeqNum) {
-1256if (state == null  
regionLocation == null  lastHost == null 
-1257openSeqNum == 
SequenceId.NO_SEQUENCE_ID) {
-1258  // This is a row with nothing 
in it.
-1259  LOG.warn("Skipping empty 
row={}", result);
-1260  return;
-1261}
-1262State localState = state;
-1263if (localState == null) {
-1264  // No region state column data 
in hbase:meta table! Are I doing a rolling upgrade from
-1265  // hbase1 to hbase2? Am I 
restoring a SNAPSHOT or otherwise adding a region to hbase:meta?
-1266  // In any of these cases, 
state is empty. For now, presume OFFLINE but there are probably
-1267  // cases where we need to 
probe more to be sure this correct; TODO informed by experience.
-1268  
LOG.info(regionInfo.getEncodedName() + " regionState=null; presuming " + 
State.OFFLINE);
-1269
-1270  localState = State.OFFLINE;
-1271}
-1272RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
-1273// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1274// meta, all the related 
procedures can not be executed. The only exception is formeta
-1275// region related operations, 
but here we do not load the informations for meta region.
-1276
regionNode.setState(localState);
-1277
regionNode.setLastHost(lastHost);
-1278
regionNode.setRegionLocation(regionLocation);
-1279
regionNode.setOpenSeqNum(openSeqNum);
-1280
-1281if (localState == State.OPEN) 
{
-1282  assert regionLocation != null 
: 

[09/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/TestMetaTableAccessor.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/TestMetaTableAccessor.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestMetaTableAccessor.html
index d58d129..5abd915 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/TestMetaTableAccessor.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/TestMetaTableAccessor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":9,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":9,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestMetaTableAccessor
+public class TestMetaTableAccessor
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test MetaTableAccessor.
 
@@ -263,51 +263,59 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testMastersSystemTimeIsUsedInMergeRegions()
+testIsMetaWhenAllHealthy()
 
 
 void
+testIsMetaWhenMetaGoesOffline()
+
+
+void
+testMastersSystemTimeIsUsedInMergeRegions()
+
+
+void
 testMastersSystemTimeIsUsedInUpdateLocations()
 Tests whether maximum of masters system time versus RSs 
local system time is used
 
 
-
+
 void
 testMetaLocationForRegionReplicasIsAddedAtRegionMerge()
 
-
+
 void
 testMetaLocationForRegionReplicasIsAddedAtRegionSplit()
 
-
+
 void
 testMetaLocationForRegionReplicasIsAddedAtTableCreation()
 
-
+
 void
 testMetaLocationForRegionReplicasIsRemovedAtTableDeletion()
 
-
+
 void
 testMetaLocationsForRegionReplicas()
 
-
+
 void
 testMetaReaderGetColumnMethods()
 
-
+
 void
 testMetaScanner()
 
-
+
 void
 testMetaUpdatesGoToPriorityQueue()
 
-
+
 void
 testParseReplicaIdFromServerColumn()
 
-
+
 void
 testRetrying()
 Does MetaTableAccessor.getRegion(Connection, 
byte[]) and a write
@@ -315,11 +323,11 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  works.
 
 
-
+
 void
 testScanMetaForTable()
 
-
+
 void
 testTableExists()
 
@@ -351,7 +359,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -360,7 +368,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -369,7 +377,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 UTIL
-private static finalHBaseTestingUtility UTIL
+private static finalHBaseTestingUtility UTIL
 
 
 
@@ -378,7 +386,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 connection
-private staticorg.apache.hadoop.hbase.client.Connection connection
+private staticorg.apache.hadoop.hbase.client.Connection connection
 
 
 
@@ -387,7 +395,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 random
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
 
 
 
@@ -396,7 +404,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -413,7 +421,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestMetaTableAccessor
-publicTestMetaTableAccessor()
+publicTestMetaTableAccessor()
 
 
 
@@ -430,7 +438,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 beforeClass
-public staticvoidbeforeClass()
+public staticvoidbeforeClass()
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -444,7 +452,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 afterClass
-public staticvoidafterClass()
+public staticvoidafterClass()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in 

[22/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 17ea7d8..b56bd67 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -542,1814 +542,1813 @@
 534  RpcController controller, 
ReportRSFatalErrorRequest request) throws ServiceException {
 535String errorText = 
request.getErrorMessage();
 536ServerName sn = 
ProtobufUtil.toServerName(request.getServer());
-537String msg = "Region server " + sn
-538  + " reported a fatal error:\n" + 
errorText;
-539LOG.error(msg);
-540master.rsFatals.add(msg);
-541return 
ReportRSFatalErrorResponse.newBuilder().build();
-542  }
-543
-544  @Override
-545  public AddColumnResponse 
addColumn(RpcController controller,
-546  AddColumnRequest req) throws 
ServiceException {
-547try {
-548  long procId = master.addColumn(
-549  
ProtobufUtil.toTableName(req.getTableName()),
-550  
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
-551  req.getNonceGroup(),
-552  req.getNonce());
-553  if (procId == -1) {
-554// This mean operation was not 
performed in server, so do not set any procId
-555return 
AddColumnResponse.newBuilder().build();
-556  } else {
-557return 
AddColumnResponse.newBuilder().setProcId(procId).build();
-558  }
-559} catch (IOException ioe) {
-560  throw new ServiceException(ioe);
-561}
-562  }
-563
-564  @Override
-565  public AssignRegionResponse 
assignRegion(RpcController controller,
-566  AssignRegionRequest req) throws 
ServiceException {
-567try {
-568  master.checkInitialized();
-569
-570  final RegionSpecifierType type = 
req.getRegion().getType();
-571  if (type != 
RegionSpecifierType.REGION_NAME) {
-572LOG.warn("assignRegion specifier 
type: expected: " + RegionSpecifierType.REGION_NAME
-573  + " actual: " + type);
-574  }
-575
-576  final byte[] regionName = 
req.getRegion().getValue().toByteArray();
-577  final RegionInfo regionInfo = 
master.getAssignmentManager().getRegionInfo(regionName);
-578  if (regionInfo == null) throw new 
UnknownRegionException(Bytes.toStringBinary(regionName));
-579
-580  final AssignRegionResponse arr = 
AssignRegionResponse.newBuilder().build();
-581  if (master.cpHost != null) {
-582
master.cpHost.preAssign(regionInfo);
-583  }
-584  
LOG.info(master.getClientIdAuditPrefix() + " assign " + 
regionInfo.getRegionNameAsString());
-585  
master.getAssignmentManager().assign(regionInfo);
-586  if (master.cpHost != null) {
-587
master.cpHost.postAssign(regionInfo);
-588  }
-589  return arr;
-590} catch (IOException ioe) {
-591  throw new ServiceException(ioe);
-592}
-593  }
+537String msg = sn + " reported a fatal 
error:\n" + errorText;
+538LOG.warn(msg);
+539master.rsFatals.add(msg);
+540return 
ReportRSFatalErrorResponse.newBuilder().build();
+541  }
+542
+543  @Override
+544  public AddColumnResponse 
addColumn(RpcController controller,
+545  AddColumnRequest req) throws 
ServiceException {
+546try {
+547  long procId = master.addColumn(
+548  
ProtobufUtil.toTableName(req.getTableName()),
+549  
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
+550  req.getNonceGroup(),
+551  req.getNonce());
+552  if (procId == -1) {
+553// This mean operation was not 
performed in server, so do not set any procId
+554return 
AddColumnResponse.newBuilder().build();
+555  } else {
+556return 
AddColumnResponse.newBuilder().setProcId(procId).build();
+557  }
+558} catch (IOException ioe) {
+559  throw new ServiceException(ioe);
+560}
+561  }
+562
+563  @Override
+564  public AssignRegionResponse 
assignRegion(RpcController controller,
+565  AssignRegionRequest req) throws 
ServiceException {
+566try {
+567  master.checkInitialized();
+568
+569  final RegionSpecifierType type = 
req.getRegion().getType();
+570  if (type != 
RegionSpecifierType.REGION_NAME) {
+571LOG.warn("assignRegion specifier 
type: expected: " + RegionSpecifierType.REGION_NAME
+572  + " actual: " + type);
+573  }
+574
+575  final byte[] regionName = 
req.getRegion().getValue().toByteArray();
+576  final RegionInfo regionInfo = 
master.getAssignmentManager().getRegionInfo(regionName);
+577  if (regionInfo == null) throw new 
UnknownRegionException(Bytes.toStringBinary(regionName));
+578
+579  final 

[06/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.MockHMaster.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.MockHMaster.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.MockHMaster.html
index e4180ea..320a1f0 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.MockHMaster.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.MockHMaster.html
@@ -270,7 +270,7 @@ extends org.apache.hadoop.hbase.master.HMaster
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.master.HMaster
-abort, abortProcedure, addColumn, addReplicationPeer, balance, balance, 
balanceSwitch, canCreateBaseZNode, canUpdateTableDescriptor, 
checkIfShouldMoveSystemRegionAsync, checkTableModifiable, configureInfoServer, 
constructMaster, createMetaBootstrap, createRpcServices, createServerManager, 
createSystemTable, createTable, decommissionRegionServers, 
decorateMasterConfiguration, deleteColumn, deleteTable, disableReplicationPeer, 
disableTable, enableReplicationPeer, enableTable, getAssignmentManager, 
getAverageLoad, getCatalogJanitor, getClientIdAuditPrefix, getClusterMetrics, 
getClusterMetrics, getClusterMetricsWithoutCoprocessor, 
getClusterMetricsWithoutCoprocessor, getClusterSchema, getDumpServlet, 
getFavoredNodesManager, getHFileCleaner, getInitializedEvent, 
getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, 
getLoadBalancer, getLoadBalancerClassName, getLoadedCoprocessors, 
getLockManager, getLocks, getLogCleaner, getMasterActiveTime, 
getMasterCoprocessor
 Host, getMasterCoprocessors, getMasterFileSystem, 
getMasterFinishedInitializationTime, getMasterMetrics, 
getMasterProcedureExecutor, getMasterProcedureManagerHost, 
getMasterQuotaManager, getMasterRpcServices, getMasterStartTime, 
getMasterWalManager, getMergePlanCount, getMetaTableObserver, 
getMobCompactionState, getNumWALFiles, getProcedures, getProcessName, 
getQuotaObserverChore, getRegionNormalizer, getRegionNormalizerTracker, 
getRegionServerFatalLogBuffer, getRegionServerInfoPort, getRegionServerVersion, 
getReplicationLoad, getReplicationPeerConfig, getServerManager, getServerName, 
getSnapshotManager, getSnapshotQuotaObserverChore, 
getSpaceQuotaSnapshotNotifier, getSplitOrMergeTracker, getSplitPlanCount, 
getSyncReplicationReplayWALManager, getTableDescriptors, getTableStateManager, 
getUseThisHostnameInstead, getWalProcedureStore, getZooKeeper, 
initClusterSchemaService, initializeZKBasedSystemTrackers, isActiveMaster, 
isBalancerOn, isInitialized, isInMaintenanceMode, isNormalizerO
 n, isSplitOrMergeEnabled, listDecommissionedRegionServers, 
listReplicationPeers, listTableDescriptors, listTableDescriptorsByNamespace, 
listTableNames, listTableNamesByNamespace, login, main, mergeRegions, 
modifyColumn, modifyTable, move, normalizeRegions, recommissionRegionServer, 
registerService, remoteProcedureCompleted, remoteProcedureFailed, 
removeReplicationPeer, reportMobCompactionEnd, reportMobCompactionStart, 
requestMobCompaction, restoreSnapshot, run, setCatalogJanitorEnabled, 
setInitialized, shutdown, splitRegion, stop, stopMaster, stopServiceThreads, 
transitReplicationPeerSyncReplicationState, truncateTable, 
updateConfigurationForQuotasObserver, updateReplicationPeerConfig, 
waitForMasterActive
+abort, abortProcedure, addColumn, addReplicationPeer, balance, balance, 
balanceSwitch, canCreateBaseZNode, canUpdateTableDescriptor, 
checkIfShouldMoveSystemRegionAsync, checkTableModifiable, configureInfoServer, 
constructMaster, createMetaBootstrap, createRpcServices, createServerManager, 
createSystemTable, createTable, decommissionRegionServers, 
decorateMasterConfiguration, deleteColumn, deleteTable, disableReplicationPeer, 
disableTable, enableReplicationPeer, enableTable, getAssignmentManager, 
getAverageLoad, getCatalogJanitor, getClientIdAuditPrefix, getClusterMetrics, 
getClusterMetrics, getClusterMetricsWithoutCoprocessor, 
getClusterMetricsWithoutCoprocessor, getClusterSchema, getDumpServlet, 
getFavoredNodesManager, getHFileCleaner, getInitializedEvent, 
getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, 
getLoadBalancer, getLoadBalancerClassName, getLoadedCoprocessors, 
getLockManager, getLocks, getLogCleaner, getMasterActiveTime, 
getMasterCoprocessor
 Host, getMasterCoprocessors, getMasterFileSystem, 
getMasterFinishedInitializationTime, getMasterMetrics, 
getMasterProcedureExecutor, getMasterProcedureManagerHost, 
getMasterQuotaManager, getMasterRpcServices, getMasterStartTime, 
getMasterWalManager, getMergePlanCount, getMetaTableObserver, 
getMobCompactionState, getNumWALFiles, getProcedures, getProcessName, 
getQuotaObserverChore, getRegionNormalizer, 

[35/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 79af07a..cca66ed 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -1071,7 +1071,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 addColumn
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponseaddColumn(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponseaddColumn(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequestreq)

throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1088,7 +1088,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 assignRegion
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponseassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponseassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequestreq)

  throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1105,7 +1105,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 balance
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponsebalance(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponsebalance(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequestrequest)

throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1122,7 +1122,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createNamespace
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponsecreateNamespace(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponsecreateNamespace(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequestrequest)

throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1139,7 +1139,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createTable
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponsecreateTable(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponsecreateTable(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequestreq)

throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1156,7 +1156,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 deleteColumn
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponsedeleteColumn(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

[11/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
index 6df7d6a..b570921 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
@@ -93,566 +93,587 @@
 085  @VisibleForTesting
 086  final boolean shared;
 087
-088  private StoreFileReader(HFile.Reader 
reader, AtomicInteger refCount, boolean shared) {
-089this.reader = reader;
-090bloomFilterType = BloomType.NONE;
-091this.refCount = refCount;
-092this.shared = shared;
-093  }
-094
-095  public StoreFileReader(FileSystem fs, 
Path path, CacheConfig cacheConf,
-096  boolean primaryReplicaStoreFile, 
AtomicInteger refCount, boolean shared, Configuration conf)
-097  throws IOException {
-098this(HFile.createReader(fs, path, 
cacheConf, primaryReplicaStoreFile, conf), refCount, shared);
-099  }
-100
-101  public StoreFileReader(FileSystem fs, 
Path path, FSDataInputStreamWrapper in, long size,
-102  CacheConfig cacheConf, boolean 
primaryReplicaStoreFile, AtomicInteger refCount,
-103  boolean shared, Configuration conf) 
throws IOException {
-104this(HFile.createReader(fs, path, in, 
size, cacheConf, primaryReplicaStoreFile, conf), refCount,
-105shared);
-106  }
-107
-108  void copyFields(StoreFileReader reader) 
{
-109this.generalBloomFilter = 
reader.generalBloomFilter;
-110this.deleteFamilyBloomFilter = 
reader.deleteFamilyBloomFilter;
-111this.bloomFilterType = 
reader.bloomFilterType;
-112this.sequenceID = 
reader.sequenceID;
-113this.timeRange = reader.timeRange;
-114this.lastBloomKey = 
reader.lastBloomKey;
-115this.bulkLoadResult = 
reader.bulkLoadResult;
-116this.lastBloomKeyOnlyKV = 
reader.lastBloomKeyOnlyKV;
-117this.skipResetSeqId = 
reader.skipResetSeqId;
-118  }
-119
-120  public boolean isPrimaryReplicaReader() 
{
-121return 
reader.isPrimaryReplicaReader();
+088  private volatile Listener listener;
+089
+090  private boolean closed = false;
+091
+092  private StoreFileReader(HFile.Reader 
reader, AtomicInteger refCount, boolean shared) {
+093this.reader = reader;
+094bloomFilterType = BloomType.NONE;
+095this.refCount = refCount;
+096this.shared = shared;
+097  }
+098
+099  public StoreFileReader(FileSystem fs, 
Path path, CacheConfig cacheConf,
+100  boolean primaryReplicaStoreFile, 
AtomicInteger refCount, boolean shared, Configuration conf)
+101  throws IOException {
+102this(HFile.createReader(fs, path, 
cacheConf, primaryReplicaStoreFile, conf), refCount, shared);
+103  }
+104
+105  public StoreFileReader(FileSystem fs, 
Path path, FSDataInputStreamWrapper in, long size,
+106  CacheConfig cacheConf, boolean 
primaryReplicaStoreFile, AtomicInteger refCount,
+107  boolean shared, Configuration conf) 
throws IOException {
+108this(HFile.createReader(fs, path, in, 
size, cacheConf, primaryReplicaStoreFile, conf), refCount,
+109shared);
+110  }
+111
+112  void copyFields(StoreFileReader reader) 
{
+113this.generalBloomFilter = 
reader.generalBloomFilter;
+114this.deleteFamilyBloomFilter = 
reader.deleteFamilyBloomFilter;
+115this.bloomFilterType = 
reader.bloomFilterType;
+116this.sequenceID = 
reader.sequenceID;
+117this.timeRange = reader.timeRange;
+118this.lastBloomKey = 
reader.lastBloomKey;
+119this.bulkLoadResult = 
reader.bulkLoadResult;
+120this.lastBloomKeyOnlyKV = 
reader.lastBloomKeyOnlyKV;
+121this.skipResetSeqId = 
reader.skipResetSeqId;
 122  }
 123
-124  /**
-125   * ONLY USE DEFAULT CONSTRUCTOR FOR 
UNIT TESTS
-126   */
-127  @VisibleForTesting
-128  StoreFileReader() {
-129this.refCount = new 
AtomicInteger(0);
-130this.reader = null;
-131this.shared = false;
-132  }
-133
-134  public CellComparator getComparator() 
{
-135return reader.getComparator();
+124  public boolean isPrimaryReplicaReader() 
{
+125return 
reader.isPrimaryReplicaReader();
+126  }
+127
+128  /**
+129   * ONLY USE DEFAULT CONSTRUCTOR FOR 
UNIT TESTS
+130   */
+131  @VisibleForTesting
+132  StoreFileReader() {
+133this.refCount = new 
AtomicInteger(0);
+134this.reader = null;
+135this.shared = false;
 136  }
 137
-138  /**
-139   * Get a scanner to scan over this 
StoreFile.
-140   * @param cacheBlocks should this 
scanner cache blocks?
-141   * @param pread use pread (for highly 
concurrent small readers)
-142   * @param isCompaction is scanner being 
used for compaction?
-143   * @param scannerOrder Order of this 
scanner relative to other scanners. See
-144   *  {@link 

[18/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index eecf20f..df4d2d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1218,664 +1218,665 @@
 1210long startTime = 
System.nanoTime();
 1211LOG.debug("Joining cluster...");
 1212
-1213// Scan hbase:meta to build list of 
existing regions, servers, and assignment
-1214// hbase:meta is online when we get 
to here and TableStateManager has been started.
-1215loadMeta();
-1216
-1217while 
(master.getServerManager().countOfRegionServers()  1) {
-1218  LOG.info("Waiting for 
RegionServers to join; current count={}",
-1219
master.getServerManager().countOfRegionServers());
-1220  Threads.sleep(250);
-1221}
-1222LOG.info("Number of 
RegionServers={}", master.getServerManager().countOfRegionServers());
-1223
-1224processOfflineRegions();
-1225
-1226// Start the RIT chore
-1227
master.getMasterProcedureExecutor().addChore(this.ritChore);
-1228
-1229long costMs = 
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
-1230LOG.info("Joined the cluster in {}", 
StringUtils.humanTimeDiff(costMs));
-1231  }
-1232
-1233  // Create assign procedure for offline 
regions.
-1234  // Just follow the old 
processofflineServersWithOnlineRegions method. Since now we do not need to
-1235  // deal with dead server any more, we 
only deal with the regions in OFFLINE state in this method.
-1236  // And this is a bit strange, that for 
new regions, we will add it in CLOSED state instead of
-1237  // OFFLINE state, and usually there 
will be a procedure to track them. The
-1238  // 
processofflineServersWithOnlineRegions is a legacy from long ago, as things are 
going really
-1239  // different now, maybe we do not need 
this method any more. Need to revisit later.
-1240  private void processOfflineRegions() 
{
-1241ListRegionInfo 
offlineRegions = regionStates.getRegionStates().stream()
-1242  
.filter(RegionState::isOffline).filter(s - 
isTableEnabled(s.getRegion().getTable()))
-1243  
.map(RegionState::getRegion).collect(Collectors.toList());
-1244if (!offlineRegions.isEmpty()) {
-1245  
master.getMasterProcedureExecutor().submitProcedures(
-1246
master.getAssignmentManager().createRoundRobinAssignProcedures(offlineRegions));
-1247}
-1248  }
-1249
-1250  private void loadMeta() throws 
IOException {
-1251// TODO: use a thread pool
-1252regionStateStore.visitMeta(new 
RegionStateStore.RegionStateVisitor() {
-1253  @Override
-1254  public void 
visitRegionState(Result result, final RegionInfo regionInfo, final State 
state,
-1255  final ServerName 
regionLocation, final ServerName lastHost, final long openSeqNum) {
-1256if (state == null  
regionLocation == null  lastHost == null 
-1257openSeqNum == 
SequenceId.NO_SEQUENCE_ID) {
-1258  // This is a row with nothing 
in it.
-1259  LOG.warn("Skipping empty 
row={}", result);
-1260  return;
-1261}
-1262State localState = state;
-1263if (localState == null) {
-1264  // No region state column data 
in hbase:meta table! Are I doing a rolling upgrade from
-1265  // hbase1 to hbase2? Am I 
restoring a SNAPSHOT or otherwise adding a region to hbase:meta?
-1266  // In any of these cases, 
state is empty. For now, presume OFFLINE but there are probably
-1267  // cases where we need to 
probe more to be sure this correct; TODO informed by experience.
-1268  
LOG.info(regionInfo.getEncodedName() + " regionState=null; presuming " + 
State.OFFLINE);
-1269
-1270  localState = State.OFFLINE;
-1271}
-1272RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
-1273// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1274// meta, all the related 
procedures can not be executed. The only exception is formeta
-1275// region related operations, 
but here we do not load the informations for meta region.
-1276
regionNode.setState(localState);
-1277
regionNode.setLastHost(lastHost);
-1278
regionNode.setRegionLocation(regionLocation);
-1279
regionNode.setOpenSeqNum(openSeqNum);
-1280
-1281if (localState == State.OPEN) 
{
-1282  assert regionLocation != null 
: "found null region location for " + regionNode;
-1283  
regionStates.addRegionToServer(regionNode);
-1284  

[13/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html
index d5face4..db404ab 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html
@@ -31,573 +31,606 @@
 023import java.util.Map;
 024import java.util.Optional;
 025import java.util.OptionalLong;
-026import 
java.util.concurrent.atomic.AtomicBoolean;
-027import 
java.util.concurrent.atomic.AtomicInteger;
-028
-029import 
org.apache.hadoop.conf.Configuration;
-030import org.apache.hadoop.fs.FileSystem;
-031import org.apache.hadoop.fs.Path;
-032import org.apache.hadoop.hbase.Cell;
-033import 
org.apache.hadoop.hbase.CellComparator;
-034import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-035import 
org.apache.hadoop.hbase.io.TimeRange;
-036import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-037import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-038import 
org.apache.hadoop.hbase.io.hfile.HFile;
-039import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+026import java.util.Set;
+027import 
java.util.concurrent.ConcurrentHashMap;
+028import 
java.util.concurrent.atomic.AtomicBoolean;
+029import 
java.util.concurrent.atomic.AtomicInteger;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import org.apache.hadoop.fs.FileSystem;
+033import org.apache.hadoop.fs.Path;
+034import org.apache.hadoop.hbase.Cell;
+035import 
org.apache.hadoop.hbase.CellComparator;
+036import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
+037import 
org.apache.hadoop.hbase.io.TimeRange;
+038import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+039import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+040import 
org.apache.hadoop.hbase.io.hfile.HFile;
+041import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
+042import 
org.apache.hadoop.hbase.util.Bytes;
+043
+044import 
org.apache.yetus.audience.InterfaceAudience;
 045
-046/**
-047 * A Store data file.  Stores usually 
have one or more of these files.  They
-048 * are produced by flushing the memstore 
to disk.  To
-049 * create, instantiate a writer using 
{@link StoreFileWriter.Builder}
-050 * and append data. Be sure to add any 
metadata before calling close on the
-051 * Writer (Use the appendMetadata 
convenience methods). On close, a StoreFile
-052 * is sitting in the Filesystem.  To 
refer to it, create a StoreFile instance
-053 * passing filesystem and path.  To read, 
call {@link #initReader()}
-054 * pStoreFiles may also reference 
store files in another Store.
-055 *
-056 * The reason for this weird pattern 
where you use a different instance for the
-057 * writer and a reader is that we write 
once but read a lot more.
-058 */
-059@InterfaceAudience.Private
-060public class HStoreFile implements 
StoreFile {
-061
-062  private static final Logger LOG = 
LoggerFactory.getLogger(HStoreFile.class.getName());
-063
-064  public static final String 
STORE_FILE_READER_NO_READAHEAD = "hbase.store.reader.no-readahead";
-065
-066  private static final boolean 
DEFAULT_STORE_FILE_READER_NO_READAHEAD = false;
+046import org.slf4j.Logger;
+047import org.slf4j.LoggerFactory;
+048import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+049import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+050
+051
+052/**
+053 * A Store data file.  Stores usually 
have one or more of these files.  They
+054 * are produced by flushing the memstore 
to disk.  To
+055 * create, instantiate a writer using 
{@link StoreFileWriter.Builder}
+056 * and append data. Be sure to add any 
metadata before calling close on the
+057 * Writer (Use the appendMetadata 
convenience methods). On close, a StoreFile
+058 * is sitting in the Filesystem.  To 
refer to it, create a StoreFile instance
+059 * passing filesystem and path.  To read, 
call {@link #initReader()}
+060 * pStoreFiles may also reference 
store files in another Store.
+061 *
+062 * The reason for this weird pattern 
where you use a different instance for the
+063 * writer and a reader is that we write 
once but read a lot more.
+064 */
+065@InterfaceAudience.Private
+066public class HStoreFile implements 
StoreFile, StoreFileReader.Listener {
 067
-068  // Keys for fileinfo values in HFile
+068  private static final Logger LOG = 
LoggerFactory.getLogger(HStoreFile.class.getName());
 069
-070  /** Max Sequence ID in FileInfo */
-071  public static final byte[] 

[21/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
index a7df46f..f0b26f3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
@@ -152,195 +152,219 @@
 144  }
 145
 146  /**
-147   * @return listing of ServerNames found 
in the filesystem under the WAL directory
-148   *   that COULD BE 'alive'; excludes 
those that have a '-splitting' suffix as these are already
-149   *   being split -- they cannot be 
'alive'.
+147   * Get Servernames which are currently 
splitting; paths have a '-splitting' suffix.
+148   * @return ServerName
+149   * @throws IOException IOException
 150   */
-151  public SetServerName 
getLiveServersFromWALDir() throws IOException {
-152Path walDirPath = new Path(rootDir, 
HConstants.HREGION_LOGDIR_NAME);
-153FileStatus[] walDirForLiveServers = 
FSUtils.listStatus(fs, walDirPath,
-154  p - 
!p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT));
-155if (walDirForLiveServers == null) {
-156  return Collections.emptySet();
-157}
-158return 
Stream.of(walDirForLiveServers).map(s - {
-159  ServerName serverName = 
AbstractFSWALProvider.getServerNameFromWALDirectoryName(s.getPath());
-160  if (serverName == null) {
-161LOG.warn("Log folder {} doesn't 
look like its name includes a " +
-162  "region server name; leaving in 
place. If you see later errors about missing " +
-163  "write ahead logs they may be 
saved in this location.", s.getPath());
-164return null;
-165  }
-166  return serverName;
-167}).filter(s - s != 
null).collect(Collectors.toSet());
-168  }
-169
-170  /**
-171   * Inspect the log directory to find 
dead servers which need recovery work
-172   * @return A set of ServerNames which 
aren't running but still have WAL files left in file system
-173   * @deprecated With proc-v2, we can 
record the crash server with procedure store, so do not need
-174   * to scan the wal 
directory to find out the splitting wal directory any more. Leave
-175   * it here only because 
{@code RecoverMetaProcedure}(which is also deprecated) uses
-176   * it.
-177   */
-178  @Deprecated
-179  public SetServerName 
getFailedServersFromLogFolders() {
-180boolean retrySplitting = 
!conf.getBoolean("hbase.hlog.split.skip.errors",
-181
WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
-182
-183SetServerName serverNames = 
new HashSet();
-184Path logsDirPath = new 
Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME);
-185
-186do {
-187  if (services.isStopped()) {
-188LOG.warn("Master stopped while 
trying to get failed servers.");
-189break;
-190  }
-191  try {
-192if (!this.fs.exists(logsDirPath)) 
return serverNames;
-193FileStatus[] logFolders = 
FSUtils.listStatus(this.fs, logsDirPath, null);
-194// Get online servers after 
getting log folders to avoid log folder deletion of newly
-195// checked in region servers . 
see HBASE-5916
-196SetServerName 
onlineServers = services.getServerManager().getOnlineServers().keySet();
-197
-198if (logFolders == null || 
logFolders.length == 0) {
-199  LOG.debug("No log files to 
split, proceeding...");
-200  return serverNames;
-201}
-202for (FileStatus status : 
logFolders) {
-203  FileStatus[] curLogFiles = 
FSUtils.listStatus(this.fs, status.getPath(), null);
-204  if (curLogFiles == null || 
curLogFiles.length == 0) {
-205// Empty log folder. No 
recovery needed
-206continue;
-207  }
-208  final ServerName serverName = 
AbstractFSWALProvider.getServerNameFromWALDirectoryName(
-209  status.getPath());
-210  if (null == serverName) {
-211LOG.warn("Log folder " + 
status.getPath() + " doesn't look like its name includes a " +
-212"region server name; 
leaving in place. If you see later errors about missing " +
-213"write ahead logs they 
may be saved in this location.");
-214  } else if 
(!onlineServers.contains(serverName)) {
-215LOG.info("Log folder " + 
status.getPath() + " doesn't belong "
-216+ "to a known region 
server, splitting");
-217
serverNames.add(serverName);
-218  } else {
-219LOG.info("Log folder " + 
status.getPath() + " belongs to an existing region server");
-220  }
-221}
-222retrySplitting = false;
-223  } catch (IOException ioe) {
-224LOG.warn("Failed 

[32/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 4f72101..a1e50c1 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":41,"i95":41,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":9,"i36":10,"i37":9,"i38":9,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":41,"i96":41,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -499,12 +499,16 @@ implements 
+void
+closeAndArchiveCompactedFiles(booleanstoreClosing)
+
+
 private HStoreFile
 commitFile(org.apache.hadoop.fs.Pathpath,
   longlogCacheFlushId,
   MonitoredTaskstatus)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHStoreFile
 compact(CompactionContextcompaction,
ThroughputControllerthroughputController,
@@ -512,44 +516,44 @@ implements Compact the StoreFiles.
 
 
-
+
 void
 compactRecentForTestingAssumingDefaultPolicy(intN)
 This method tries to compact N recent files for 
testing.
 
 
-
+
 protected void
 completeCompaction(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFilecompactedFiles)
 It works by processing a compaction that's been written to 
disk.
 
 
-
+
 protected void
 createCacheConf(ColumnFamilyDescriptorfamily)
 Creates the cache config.
 
 
-
+
 private HFileContext
 createFileContext(Compression.Algorithmcompression,
  booleanincludeMVCCReadpoint,
  booleanincludesTag,
  Encryption.ContextcryptoContext)
 
-
+
 StoreFlushContext
 createFlushContext(longcacheFlushId,
   FlushLifeCycleTrackertracker)
 
-
+
 protected KeyValueScanner
 createScanner(Scanscan,
  ScanInfoscanInfo,
  

[20/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index eecf20f..df4d2d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -1218,664 +1218,665 @@
 1210long startTime = 
System.nanoTime();
 1211LOG.debug("Joining cluster...");
 1212
-1213// Scan hbase:meta to build list of 
existing regions, servers, and assignment
-1214// hbase:meta is online when we get 
to here and TableStateManager has been started.
-1215loadMeta();
-1216
-1217while 
(master.getServerManager().countOfRegionServers()  1) {
-1218  LOG.info("Waiting for 
RegionServers to join; current count={}",
-1219
master.getServerManager().countOfRegionServers());
-1220  Threads.sleep(250);
-1221}
-1222LOG.info("Number of 
RegionServers={}", master.getServerManager().countOfRegionServers());
-1223
-1224processOfflineRegions();
-1225
-1226// Start the RIT chore
-1227
master.getMasterProcedureExecutor().addChore(this.ritChore);
-1228
-1229long costMs = 
TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
-1230LOG.info("Joined the cluster in {}", 
StringUtils.humanTimeDiff(costMs));
-1231  }
-1232
-1233  // Create assign procedure for offline 
regions.
-1234  // Just follow the old 
processofflineServersWithOnlineRegions method. Since now we do not need to
-1235  // deal with dead server any more, we 
only deal with the regions in OFFLINE state in this method.
-1236  // And this is a bit strange, that for 
new regions, we will add it in CLOSED state instead of
-1237  // OFFLINE state, and usually there 
will be a procedure to track them. The
-1238  // 
processofflineServersWithOnlineRegions is a legacy from long ago, as things are 
going really
-1239  // different now, maybe we do not need 
this method any more. Need to revisit later.
-1240  private void processOfflineRegions() 
{
-1241ListRegionInfo 
offlineRegions = regionStates.getRegionStates().stream()
-1242  
.filter(RegionState::isOffline).filter(s - 
isTableEnabled(s.getRegion().getTable()))
-1243  
.map(RegionState::getRegion).collect(Collectors.toList());
-1244if (!offlineRegions.isEmpty()) {
-1245  
master.getMasterProcedureExecutor().submitProcedures(
-1246
master.getAssignmentManager().createRoundRobinAssignProcedures(offlineRegions));
-1247}
-1248  }
-1249
-1250  private void loadMeta() throws 
IOException {
-1251// TODO: use a thread pool
-1252regionStateStore.visitMeta(new 
RegionStateStore.RegionStateVisitor() {
-1253  @Override
-1254  public void 
visitRegionState(Result result, final RegionInfo regionInfo, final State 
state,
-1255  final ServerName 
regionLocation, final ServerName lastHost, final long openSeqNum) {
-1256if (state == null  
regionLocation == null  lastHost == null 
-1257openSeqNum == 
SequenceId.NO_SEQUENCE_ID) {
-1258  // This is a row with nothing 
in it.
-1259  LOG.warn("Skipping empty 
row={}", result);
-1260  return;
-1261}
-1262State localState = state;
-1263if (localState == null) {
-1264  // No region state column data 
in hbase:meta table! Are I doing a rolling upgrade from
-1265  // hbase1 to hbase2? Am I 
restoring a SNAPSHOT or otherwise adding a region to hbase:meta?
-1266  // In any of these cases, 
state is empty. For now, presume OFFLINE but there are probably
-1267  // cases where we need to 
probe more to be sure this correct; TODO informed by experience.
-1268  
LOG.info(regionInfo.getEncodedName() + " regionState=null; presuming " + 
State.OFFLINE);
-1269
-1270  localState = State.OFFLINE;
-1271}
-1272RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
-1273// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1274// meta, all the related 
procedures can not be executed. The only exception is formeta
-1275// region related operations, 
but here we do not load the informations for meta region.
-1276
regionNode.setState(localState);
-1277
regionNode.setLastHost(lastHost);
-1278
regionNode.setRegionLocation(regionLocation);
-1279
regionNode.setOpenSeqNum(openSeqNum);
-1280
-1281if (localState == State.OPEN) 
{
-1282  assert regionLocation != 

[28/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index d2da8f4..27235ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 

[29/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index d4b78a3..e9cc3ea 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -1826,80 +1826,83 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
+StoreFileReader.Listener
+
+
 StorefileRefresherChore
 A chore for refreshing the store files for secondary 
regions hosted in the region server.
 
 
-
+
 StoreFileScanner
 KeyValueScanner adaptor over the Reader.
 
 
-
+
 StoreFileWriter
 A StoreFile writer.
 
 
-
+
 StoreFileWriter.Builder
 
-
+
 StoreFlushContext
 A package protected interface for a store flushing.
 
 
-
+
 StoreFlusher
 Store flusher interface.
 
 
-
+
 StoreScanner
 Scanner scans both the memstore and the Store.
 
 
-
+
 StripeMultiFileWriter
 Base class for cell sink that separates the provided cells 
into multiple files for stripe
  compaction.
 
 
-
+
 StripeStoreConfig
 Configuration class for stripe store and compactions.
 
 
-
+
 StripeStoreFileManager
 Stripe implementation of StoreFileManager.
 
 
-
+
 StripeStoreFileManager.State
 The state class.
 
 
-
+
 StripeStoreFlusher.StripeFlushRequest
 Stripe flush request wrapper that writes a non-striped 
file.
 
 
-
+
 TimeRangeTracker
 Stores minimum and maximum timestamp values, it is 
[minimumTimestamp, maximumTimestamp] in
  interval notation.
 
 
-
+
 TimeRangeTracker.Type
 
-
+
 VersionedSegmentsList
 A list of segment managers coupled with the version of the 
memstore (version at the time it was
  created).
 
 
-
+
 WrongRegionException
 Thrown when a request contains a key which is not part of 
this region
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 858ccf6..23060c2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,8 +130,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index 69f2dc6..f594c43 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -143,8 +143,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.security.access.Permission.Action
-org.apache.hadoop.hbase.security.access.AccessController.OpType
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
+org.apache.hadoop.hbase.security.access.AccessController.OpType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html 

[05/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.MetaTask.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.MetaTask.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.MetaTask.html
index 8c8cc19..ad0629d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.MetaTask.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.MetaTask.html
@@ -54,838 +54,854 @@
 046import 
org.apache.hadoop.hbase.ipc.DelegatingRpcScheduler;
 047import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
 048import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-049import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-050import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-051import 
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-052import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-053import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-054import 
org.apache.hadoop.hbase.util.Bytes;
-055import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-056import 
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.ClassRule;
-063import org.junit.Rule;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.rules.TestName;
-067import org.slf4j.Logger;
-068import org.slf4j.LoggerFactory;
-069
-070import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-071import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-072
-073/**
-074 * Test {@link 
org.apache.hadoop.hbase.MetaTableAccessor}.
-075 */
-076@Category({MiscTests.class, 
MediumTests.class})
-077@SuppressWarnings("deprecation")
-078public class TestMetaTableAccessor {
-079
-080  @ClassRule
-081  public static final HBaseClassTestRule 
CLASS_RULE =
-082  
HBaseClassTestRule.forClass(TestMetaTableAccessor.class);
-083
-084  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaTableAccessor.class);
-085  private static final  
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-086  private static Connection connection;
-087  private Random random = new Random();
-088
-089  @Rule
-090  public TestName name = new 
TestName();
-091
-092  @BeforeClass public static void 
beforeClass() throws Exception {
-093UTIL.startMiniCluster(3);
-094
-095Configuration c = new 
Configuration(UTIL.getConfiguration());
-096// Tests to 4 retries every 5 
seconds. Make it try every 1 second so more
-097// responsive.  1 second is default 
as is ten retries.
-098c.setLong("hbase.client.pause", 
1000);
-099
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-100connection = 
ConnectionFactory.createConnection(c);
-101  }
-102
-103  @AfterClass public static void 
afterClass() throws Exception {
-104connection.close();
-105UTIL.shutdownMiniCluster();
-106  }
-107
-108  /**
-109   * Does {@link 
MetaTableAccessor#getRegion(Connection, byte[])} and a write
-110   * against hbase:meta while its hosted 
server is restarted to prove our retrying
-111   * works.
-112   */
-113  @Test public void testRetrying()
-114  throws IOException, 
InterruptedException {
-115final TableName tableName = 
TableName.valueOf(name.getMethodName());
-116LOG.info("Started " + tableName);
-117Table t = 
UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
-118int regionCount = -1;
-119try (RegionLocator r = 
UTIL.getConnection().getRegionLocator(tableName)) {
-120  regionCount = 
r.getStartKeys().length;
-121}
-122// Test it works getting a region 
from just made user table.
-123final ListRegionInfo regions 
=
-124  testGettingTableRegions(connection, 
tableName, regionCount);
-125MetaTask reader = new 
MetaTask(connection, "reader") {
-126  @Override
-127  void metaTask() throws Throwable 
{
-128testGetRegion(connection, 
regions.get(0));
-129LOG.info("Read " + 
regions.get(0).getEncodedName());
-130  }
-131};
-132MetaTask writer = new 
MetaTask(connection, "writer") {
-133  @Override
-134  void metaTask() throws Throwable 
{
-135
MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
-136LOG.info("Wrote " + 
regions.get(0).getEncodedName());
-137  }
-138};
-139reader.start();
-140writer.start();
-141
-142// We're gonna check how it takes. If 
it takes too long, we will consider
-143//  it as a fail. We can't put that 
in the @Test tag as we want to close
-144//  the threads nicely
-145final long 

[12/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html
new file mode 100644
index 000..b570921
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html
@@ -0,0 +1,740 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_TYPE_KEY;
+021import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.DELETE_FAMILY_COUNT;
+022import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.LAST_BLOOM_KEY;
+023
+024import java.io.DataInput;
+025import java.io.IOException;
+026import java.util.Map;
+027import java.util.Optional;
+028import java.util.SortedSet;
+029import 
java.util.concurrent.atomic.AtomicInteger;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import org.apache.hadoop.fs.FileSystem;
+033import org.apache.hadoop.fs.Path;
+034import org.apache.hadoop.hbase.Cell;
+035import 
org.apache.hadoop.hbase.CellComparator;
+036import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+037import 
org.apache.hadoop.hbase.HConstants;
+038import 
org.apache.hadoop.hbase.PrivateCellUtil;
+039import 
org.apache.hadoop.hbase.KeyValue;
+040import 
org.apache.hadoop.hbase.client.Scan;
+041import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+042import 
org.apache.hadoop.hbase.io.TimeRange;
+043import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+044import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+045import 
org.apache.hadoop.hbase.io.hfile.HFile;
+046import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+047import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+048import 
org.apache.hadoop.hbase.nio.ByteBuff;
+049import 
org.apache.hadoop.hbase.util.BloomFilter;
+050import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
+051import 
org.apache.hadoop.hbase.util.Bytes;
+052import 
org.apache.yetus.audience.InterfaceAudience;
+053import 
org.apache.yetus.audience.InterfaceStability;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+057
+058/**
+059 * Reader for a StoreFile.
+060 */
+061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.PHOENIX)
+062@InterfaceStability.Evolving
+063public class StoreFileReader {
+064  private static final Logger LOG = 
LoggerFactory.getLogger(StoreFileReader.class.getName());
+065
+066  protected BloomFilter 
generalBloomFilter = null;
+067  protected BloomFilter 
deleteFamilyBloomFilter = null;
+068  protected BloomType bloomFilterType;
+069  private final HFile.Reader reader;
+070  protected long sequenceID = -1;
+071  protected TimeRange timeRange = null;
+072  private byte[] lastBloomKey;
+073  private long deleteFamilyCnt = -1;
+074  private boolean bulkLoadResult = 
false;
+075  private KeyValue.KeyOnlyKeyValue 
lastBloomKeyOnlyKV = null;
+076  private boolean skipResetSeqId = 
true;
+077
+078  // Counter that is incremented every 
time a scanner is created on the
+079  // store file. It is decremented when 
the scan on the store file is
+080  // done. All StoreFileReader for the 
same StoreFile will share this counter.
+081  private final AtomicInteger refCount;
+082
+083  // indicate that whether this 
StoreFileReader is shared, i.e., used for pread. If not, we will
+084  // close the internal reader when 
readCompleted is called.
+085  @VisibleForTesting
+086  final boolean shared;
+087
+088  private volatile Listener listener;
+089
+090  private boolean closed = false;
+091
+092  private StoreFileReader(HFile.Reader 
reader, AtomicInteger refCount, boolean shared) {
+093this.reader = reader;
+094bloomFilterType = 

[24/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index d2da8f4..27235ad 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -63,3784 +63,3870 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058import 
org.apache.commons.lang3.StringUtils;
-059import 
org.apache.hadoop.conf.Configuration;
-060import org.apache.hadoop.fs.Path;
-061import 
org.apache.hadoop.hbase.ChoreService;
-062import 
org.apache.hadoop.hbase.ClusterId;
-063import 
org.apache.hadoop.hbase.ClusterMetrics;
-064import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-065import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-066import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-067import 
org.apache.hadoop.hbase.HBaseIOException;
-068import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-069import 
org.apache.hadoop.hbase.HConstants;
-070import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-071import 
org.apache.hadoop.hbase.MasterNotRunningException;
-072import 
org.apache.hadoop.hbase.MetaTableAccessor;
-073import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-074import 
org.apache.hadoop.hbase.PleaseHoldException;
-075import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-076import 
org.apache.hadoop.hbase.ServerName;
-077import 
org.apache.hadoop.hbase.TableDescriptors;
-078import 
org.apache.hadoop.hbase.TableName;
-079import 
org.apache.hadoop.hbase.TableNotDisabledException;
-080import 
org.apache.hadoop.hbase.TableNotFoundException;
-081import 
org.apache.hadoop.hbase.UnknownRegionException;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-083import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-085import 
org.apache.hadoop.hbase.client.RegionInfo;
-086import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-087import 
org.apache.hadoop.hbase.client.Result;
-088import 
org.apache.hadoop.hbase.client.TableDescriptor;
-089import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-090import 
org.apache.hadoop.hbase.client.TableState;
-091import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-092import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-093import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorType;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-096import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer;
-100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-105import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-107import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-108import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-109import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-110import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-111import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-112import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-113import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-114import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-115import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-116import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-117import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-118import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-119import 
org.apache.hadoop.hbase.master.locking.LockManager;
-120import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-121import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-122import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-123import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-124import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-125import 

[07/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
index 0281dfc..fb26734 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
@@ -246,304 +246,308 @@
 
 
 
-TestClearRegionBlockCache
+TestCleanupCompactedFileOnRegionClose
 
 
 
+TestClearRegionBlockCache
+
+
+
 TestClusterId
 
 Test metrics incremented on region server operations.
 
 
-
+
 TestColumnSeeking
 
 
-
+
 TestCompactingMemStore
 
 compacted memstore test case
 
 
-
+
 TestCompactingMemStore.MyCompactingMemStore
 
 
-
+
 TestCompactingToCellFlatMapMemStore
 
 compacted memstore test case
 
 
-
+
 TestCompaction
 
 Test compaction framework and common functions
 
 
-
+
 TestCompaction.DummyCompactor
 
 
-
+
 TestCompaction.Tracker
 
 Simple CompactionLifeCycleTracker on which you 
can wait until the requested compaction
  finishes.
 
 
-
+
 TestCompaction.WaitThroughPutController
 
 Simple CompactionLifeCycleTracker on which you 
can wait until the requested compaction
  finishes.
 
 
-
+
 TestCompactionArchiveConcurrentClose
 
 Tests a race condition between archiving of compacted files 
in CompactedHFilesDischarger chore
  and HRegion.close();
 
 
-
+
 TestCompactionArchiveIOException
 
 Tests that archiving compacted files behaves correctly when 
encountering exceptions.
 
 
-
+
 TestCompactionFileNotFound
 
 This class tests the scenario where a store refresh happens 
due to a file not found during scan,
  after a compaction but before the compacted files are archived.
 
 
-
+
 TestCompactionInDeadRegionServer
 
 This testcase is used to ensure that the compaction marker 
will fail a compaction if the RS is
  already dead.
 
 
-
+
 TestCompactionInDeadRegionServer.IgnoreYouAreDeadRS
 
 
-
+
 TestCompactionLifeCycleTracker
 
 Confirm that the function of CompactionLifeCycleTracker is 
OK as we do not use it in our own
  code.
 
 
-
+
 TestCompactionLifeCycleTracker.CompactionObserver
 
 
-
+
 TestCompactionLifeCycleTracker.Tracker
 
 
-
+
 TestCompactionPolicy
 
 
-
+
 TestCompactionState
 
 Unit tests to test retrieving table/region compaction 
state
 
 
-
+
 TestCompactionWithCoprocessor
 
 Make sure compaction tests still pass with the preFlush and 
preCompact
  overridden to implement the default behavior
 
 
-
+
 TestCompactSplitThread
 
 
-
+
 TestCompoundBloomFilter
 
 Tests writing Bloom filter blocks in the same part of the 
file as data
  blocks.
 
 
-
+
 TestDataBlockEncodingTool
 
 Test DataBlockEncodingTool.
 
 
-
+
 TestDateTieredCompactionPolicy
 
 
-
+
 TestDateTieredCompactionPolicyOverflow
 
 
-
+
 TestDefaultCompactSelection
 
 
-
+
 TestDefaultMemStore
 
 memstore test case
 
 
-
+
 TestDefaultMemStore.ReadOwnWritesTester
 
 
-
+
 TestDefaultStoreEngine
 
 
-
+
 TestDefaultStoreEngine.DummyCompactionPolicy
 
 
-
+
 TestDefaultStoreEngine.DummyCompactor
 
 
-
+
 TestDefaultStoreEngine.DummyStoreFlusher
 
 
-
+
 TestDeleteMobTable
 
 
-
+
 TestEncryptionKeyRotation
 
 
-
+
 TestEncryptionRandomKeying
 
 
-
+
 TestEndToEndSplitTransaction
 
 
-
+
 TestEndToEndSplitTransaction.RegionChecker
 
 Checks regions using MetaTableAccessor and HTable 
methods
 
 
-
+
 TestEndToEndSplitTransaction.RegionSplitter
 
 
-
+
 TestFailedAppendAndSync
 
 Testing sync/append failures.
 
 
-
+
 TestFlushLifeCycleTracker
 
 Confirm that the function of FlushLifeCycleTracker is OK as 
we do not use it in our own code.
 
 
-
+
 TestFlushLifeCycleTracker.FlushObserver
 
 
-
+
 TestFlushLifeCycleTracker.Tracker
 
 
-
+
 TestFlushRegionEntry
 
 
-
+
 TestFSErrorsExposed
 
 Test cases that ensure that file system level errors are 
bubbled up
  appropriately to clients, rather than swallowed.
 
 
-
+
 TestFSErrorsExposed.FaultyFileSystem
 
 
-
+
 TestFSErrorsExposed.FaultyInputStream
 
 
-
+
 TestGetClosestAtOrBefore
 
 TestGet is a medley of tests of get all done up as a single 
test.
 
 
-
+
 TestHdfsSnapshotHRegion
 
 
-
+
 TestHeapMemoryManager
 
 
-
+
 TestHeapMemoryManager.BlockCacheStub
 
 
-
+
 TestHeapMemoryManager.CustomHeapMemoryTuner
 
 
-
+
 TestHeapMemoryManager.MemstoreFlusherStub
 
 
-
+
 TestHeapMemoryManager.RegionServerAccountingStub
 
 
-
+
 TestHeapMemoryManager.RegionServerStub
 
 
-
+
 TestHMobStore
 
 
-
+
 TestHRegion
 
 Basic stand-alone testing of HRegion.
 
 
-
+
 TestHRegion.Appender
 
 TestCase for append
 
 
-
+
 TestHRegion.HRegionForTesting
 
 The same as HRegion class, the only difference is that 
instantiateHStore will
  create a different HStore - HStoreForTesting.
 
 
-
+
 TestHRegion.HRegionWithSeqId
 
 
-
+
 TestHRegion.HStoreForTesting
 
 HStoreForTesting is merely the same as HStore, the 

[15/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index e074a8c..8cc5add 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -722,172 +722,172 @@
 714  
"hbase.regionserver.kerberos.principal", host);
 715  }
 716
-717  protected void waitForMasterActive() 
{
-718  }
-719
-720  protected String getProcessName() {
-721return REGIONSERVER;
-722  }
+717
+718  /**
+719   * Wait for an active Master.
+720   * See override in Master superclass 
for how it is used.
+721   */
+722  protected void waitForMasterActive() 
{}
 723
-724  protected boolean canCreateBaseZNode() 
{
-725return this.masterless;
+724  protected String getProcessName() {
+725return REGIONSERVER;
 726  }
 727
-728  protected boolean 
canUpdateTableDescriptor() {
-729return false;
+728  protected boolean canCreateBaseZNode() 
{
+729return this.masterless;
 730  }
 731
-732  protected RSRpcServices 
createRpcServices() throws IOException {
-733return new RSRpcServices(this);
+732  protected boolean 
canUpdateTableDescriptor() {
+733return false;
 734  }
 735
-736  protected void configureInfoServer() 
{
-737infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
-738infoServer.setAttribute(REGIONSERVER, 
this);
-739  }
-740
-741  protected Class? extends 
HttpServlet getDumpServlet() {
-742return RSDumpServlet.class;
+736  protected RSRpcServices 
createRpcServices() throws IOException {
+737return new RSRpcServices(this);
+738  }
+739
+740  protected void configureInfoServer() 
{
+741infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
+742infoServer.setAttribute(REGIONSERVER, 
this);
 743  }
 744
-745  @Override
-746  public boolean 
registerService(com.google.protobuf.Service instance) {
-747/*
-748 * No stacking of instances is 
allowed for a single executorService name
-749 */
-750
com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc =
-751
instance.getDescriptorForType();
-752String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-753if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-754  LOG.error("Coprocessor 
executorService " + serviceName
-755  + " already registered, 
rejecting request from " + instance);
-756  return false;
-757}
-758
-759
coprocessorServiceHandlers.put(serviceName, instance);
-760if (LOG.isDebugEnabled()) {
-761  LOG.debug("Registered regionserver 
coprocessor executorService: executorService=" + serviceName);
-762}
-763return true;
-764  }
-765
-766  /**
-767   * Create a 'smarter' Connection, one 
that is capable of by-passing RPC if the request is to
-768   * the local server; i.e. a 
short-circuit Connection. Safe to use going to local or remote
-769   * server. Create this instance in a 
method can be intercepted and mocked in tests.
-770   * @throws IOException
-771   */
-772  @VisibleForTesting
-773  protected ClusterConnection 
createClusterConnection() throws IOException {
-774Configuration conf = this.conf;
-775if 
(conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
-776  // Use server ZK cluster for 
server-issued connections, so we clone
-777  // the conf and unset the client ZK 
related properties
-778  conf = new 
Configuration(this.conf);
-779  
conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM);
-780}
-781// Create a cluster connection that 
when appropriate, can short-circuit and go directly to the
-782// local server if the request is to 
the local server bypassing RPC. Can be used for both local
-783// and remote invocations.
-784return 
ConnectionUtils.createShortCircuitConnection(conf, null, 
userProvider.getCurrent(),
-785  serverName, rpcServices, 
rpcServices);
-786  }
-787
-788  /**
-789   * Run test on configured codecs to 
make sure supporting libs are in place.
-790   * @param c
-791   * @throws IOException
-792   */
-793  private static void checkCodecs(final 
Configuration c) throws IOException {
-794// check to see if the codec list is 
available:
-795String [] codecs = 
c.getStrings("hbase.regionserver.codecs", (String[])null);
-796if (codecs == null) return;
-797for (String codec : codecs) {
-798  if 
(!CompressionTest.testCompression(codec)) {
-799throw new 
IOException("Compression codec " + codec +
-800  " not supported, aborting RS 
construction");
-801  }
-802}
-803  }
-804
-805  public String getClusterId() {
-806return this.clusterId;

[16/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index e074a8c..8cc5add 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -722,172 +722,172 @@
 714  
"hbase.regionserver.kerberos.principal", host);
 715  }
 716
-717  protected void waitForMasterActive() 
{
-718  }
-719
-720  protected String getProcessName() {
-721return REGIONSERVER;
-722  }
+717
+718  /**
+719   * Wait for an active Master.
+720   * See override in Master superclass 
for how it is used.
+721   */
+722  protected void waitForMasterActive() 
{}
 723
-724  protected boolean canCreateBaseZNode() 
{
-725return this.masterless;
+724  protected String getProcessName() {
+725return REGIONSERVER;
 726  }
 727
-728  protected boolean 
canUpdateTableDescriptor() {
-729return false;
+728  protected boolean canCreateBaseZNode() 
{
+729return this.masterless;
 730  }
 731
-732  protected RSRpcServices 
createRpcServices() throws IOException {
-733return new RSRpcServices(this);
+732  protected boolean 
canUpdateTableDescriptor() {
+733return false;
 734  }
 735
-736  protected void configureInfoServer() 
{
-737infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
-738infoServer.setAttribute(REGIONSERVER, 
this);
-739  }
-740
-741  protected Class? extends 
HttpServlet getDumpServlet() {
-742return RSDumpServlet.class;
+736  protected RSRpcServices 
createRpcServices() throws IOException {
+737return new RSRpcServices(this);
+738  }
+739
+740  protected void configureInfoServer() 
{
+741infoServer.addServlet("rs-status", 
"/rs-status", RSStatusServlet.class);
+742infoServer.setAttribute(REGIONSERVER, 
this);
 743  }
 744
-745  @Override
-746  public boolean 
registerService(com.google.protobuf.Service instance) {
-747/*
-748 * No stacking of instances is 
allowed for a single executorService name
-749 */
-750
com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc =
-751
instance.getDescriptorForType();
-752String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-753if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-754  LOG.error("Coprocessor 
executorService " + serviceName
-755  + " already registered, 
rejecting request from " + instance);
-756  return false;
-757}
-758
-759
coprocessorServiceHandlers.put(serviceName, instance);
-760if (LOG.isDebugEnabled()) {
-761  LOG.debug("Registered regionserver 
coprocessor executorService: executorService=" + serviceName);
-762}
-763return true;
-764  }
-765
-766  /**
-767   * Create a 'smarter' Connection, one 
that is capable of by-passing RPC if the request is to
-768   * the local server; i.e. a 
short-circuit Connection. Safe to use going to local or remote
-769   * server. Create this instance in a 
method can be intercepted and mocked in tests.
-770   * @throws IOException
-771   */
-772  @VisibleForTesting
-773  protected ClusterConnection 
createClusterConnection() throws IOException {
-774Configuration conf = this.conf;
-775if 
(conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
-776  // Use server ZK cluster for 
server-issued connections, so we clone
-777  // the conf and unset the client ZK 
related properties
-778  conf = new 
Configuration(this.conf);
-779  
conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM);
-780}
-781// Create a cluster connection that 
when appropriate, can short-circuit and go directly to the
-782// local server if the request is to 
the local server bypassing RPC. Can be used for both local
-783// and remote invocations.
-784return 
ConnectionUtils.createShortCircuitConnection(conf, null, 
userProvider.getCurrent(),
-785  serverName, rpcServices, 
rpcServices);
-786  }
-787
-788  /**
-789   * Run test on configured codecs to 
make sure supporting libs are in place.
-790   * @param c
-791   * @throws IOException
-792   */
-793  private static void checkCodecs(final 
Configuration c) throws IOException {
-794// check to see if the codec list is 
available:
-795String [] codecs = 
c.getStrings("hbase.regionserver.codecs", (String[])null);
-796if (codecs == null) return;
-797for (String codec : codecs) {
-798  if 
(!CompressionTest.testCompression(codec)) {
-799throw new 
IOException("Compression codec " + codec +
-800  " not supported, aborting RS 
construction");

[10/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/devapidocs/src-html/org/apache/hadoop/hbase/util/RetryCounterFactory.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RetryCounterFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RetryCounterFactory.html
index f33d7e3..5c0a9e2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/RetryCounterFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/RetryCounterFactory.html
@@ -36,27 +36,31 @@
 028public class RetryCounterFactory {
 029  private final RetryConfig 
retryConfig;
 030
-031  public RetryCounterFactory(int 
maxAttempts, int sleepIntervalMillis) {
-032this(maxAttempts, 
sleepIntervalMillis, -1);
+031  public RetryCounterFactory(int 
sleepIntervalMillis) {
+032this(Integer.MAX_VALUE, 
sleepIntervalMillis);
 033  }
 034
-035  public RetryCounterFactory(int 
maxAttempts, int sleepIntervalMillis, int maxSleepTime) {
-036this(new RetryConfig(
-037  maxAttempts,
-038  sleepIntervalMillis,
-039  maxSleepTime,
-040  TimeUnit.MILLISECONDS,
-041  new 
ExponentialBackoffPolicyWithLimit()));
-042  }
-043
-044  public RetryCounterFactory(RetryConfig 
retryConfig) {
-045this.retryConfig = retryConfig;
+035  public RetryCounterFactory(int 
maxAttempts, int sleepIntervalMillis) {
+036this(maxAttempts, 
sleepIntervalMillis, -1);
+037  }
+038
+039  public RetryCounterFactory(int 
maxAttempts, int sleepIntervalMillis, int maxSleepTime) {
+040this(new RetryConfig(
+041  maxAttempts,
+042  sleepIntervalMillis,
+043  maxSleepTime,
+044  TimeUnit.MILLISECONDS,
+045  new 
ExponentialBackoffPolicyWithLimit()));
 046  }
 047
-048  public RetryCounter create() {
-049return new 
RetryCounter(retryConfig);
+048  public RetryCounterFactory(RetryConfig 
retryConfig) {
+049this.retryConfig = retryConfig;
 050  }
-051}
+051
+052  public RetryCounter create() {
+053return new 
RetryCounter(retryConfig);
+054  }
+055}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 09ef17a..e9b8cdc 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase Downloads
 
@@ -423,7 +423,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-09-16
+  Last Published: 
2018-09-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 0543853..f96c4ac 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -331,7 +331,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-09-16
+  Last Published: 
2018-09-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/index.html
--
diff --git a/index.html b/index.html
index 90b78b8..c7e50fb 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -411,7 +411,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-09-16
+  Last Published: 
2018-09-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/integration.html
--
diff --git a/integration.html b/integration.html
index f9e7775..c2687da 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -291,7 +291,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-09-16
+  Last Published: 
2018-09-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/issue-tracking.html

[03/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcSchedulerFactory.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcSchedulerFactory.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcSchedulerFactory.html
index 8c8cc19..ad0629d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcSchedulerFactory.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestMetaTableAccessor.SpyingRpcSchedulerFactory.html
@@ -54,838 +54,854 @@
 046import 
org.apache.hadoop.hbase.ipc.DelegatingRpcScheduler;
 047import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
 048import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-049import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-050import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-051import 
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
-052import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-053import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-054import 
org.apache.hadoop.hbase.util.Bytes;
-055import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-056import 
org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.ClassRule;
-063import org.junit.Rule;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.rules.TestName;
-067import org.slf4j.Logger;
-068import org.slf4j.LoggerFactory;
-069
-070import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-071import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-072
-073/**
-074 * Test {@link 
org.apache.hadoop.hbase.MetaTableAccessor}.
-075 */
-076@Category({MiscTests.class, 
MediumTests.class})
-077@SuppressWarnings("deprecation")
-078public class TestMetaTableAccessor {
-079
-080  @ClassRule
-081  public static final HBaseClassTestRule 
CLASS_RULE =
-082  
HBaseClassTestRule.forClass(TestMetaTableAccessor.class);
-083
-084  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaTableAccessor.class);
-085  private static final  
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-086  private static Connection connection;
-087  private Random random = new Random();
-088
-089  @Rule
-090  public TestName name = new 
TestName();
-091
-092  @BeforeClass public static void 
beforeClass() throws Exception {
-093UTIL.startMiniCluster(3);
-094
-095Configuration c = new 
Configuration(UTIL.getConfiguration());
-096// Tests to 4 retries every 5 
seconds. Make it try every 1 second so more
-097// responsive.  1 second is default 
as is ten retries.
-098c.setLong("hbase.client.pause", 
1000);
-099
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
-100connection = 
ConnectionFactory.createConnection(c);
-101  }
-102
-103  @AfterClass public static void 
afterClass() throws Exception {
-104connection.close();
-105UTIL.shutdownMiniCluster();
-106  }
-107
-108  /**
-109   * Does {@link 
MetaTableAccessor#getRegion(Connection, byte[])} and a write
-110   * against hbase:meta while its hosted 
server is restarted to prove our retrying
-111   * works.
-112   */
-113  @Test public void testRetrying()
-114  throws IOException, 
InterruptedException {
-115final TableName tableName = 
TableName.valueOf(name.getMethodName());
-116LOG.info("Started " + tableName);
-117Table t = 
UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
-118int regionCount = -1;
-119try (RegionLocator r = 
UTIL.getConnection().getRegionLocator(tableName)) {
-120  regionCount = 
r.getStartKeys().length;
-121}
-122// Test it works getting a region 
from just made user table.
-123final ListRegionInfo regions 
=
-124  testGettingTableRegions(connection, 
tableName, regionCount);
-125MetaTask reader = new 
MetaTask(connection, "reader") {
-126  @Override
-127  void metaTask() throws Throwable 
{
-128testGetRegion(connection, 
regions.get(0));
-129LOG.info("Read " + 
regions.get(0).getEncodedName());
-130  }
-131};
-132MetaTask writer = new 
MetaTask(connection, "writer") {
-133  @Override
-134  void metaTask() throws Throwable 
{
-135
MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
-136LOG.info("Wrote " + 
regions.get(0).getEncodedName());
-137  }
-138};
-139reader.start();
-140writer.start();
-141
-142// We're gonna check how it takes. If 
it takes too long, we will consider
-143//  it as a fail. We can't put that 
in the 

[01/40] hbase-site git commit: Published site at 6d7bc0e98b25215e79f67f107fd0d3306dfcf352.

2018-09-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site bcb894667 -> 738e976e8


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/738e976e/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.html
new file mode 100644
index 000..1169764
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestCleanupCompactedFileOnRegionClose.html
@@ -0,0 +1,282 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.regionserver;
+020
+021import static 
org.junit.Assert.assertEquals;
+022import static 
org.junit.Assert.assertFalse;
+023import static 
org.junit.Assert.assertNotNull;
+024import static 
org.junit.Assert.assertTrue;
+025import static org.junit.Assert.fail;
+026
+027import java.io.IOException;
+028import java.util.Collection;
+029
+030import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+031import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+032import 
org.apache.hadoop.hbase.KeyValue;
+033import 
org.apache.hadoop.hbase.TableName;
+034import 
org.apache.hadoop.hbase.client.Delete;
+035import 
org.apache.hadoop.hbase.client.Get;
+036import 
org.apache.hadoop.hbase.client.HBaseAdmin;
+037import 
org.apache.hadoop.hbase.client.IsolationLevel;
+038import 
org.apache.hadoop.hbase.client.Put;
+039import 
org.apache.hadoop.hbase.client.Result;
+040import 
org.apache.hadoop.hbase.client.ResultScanner;
+041import 
org.apache.hadoop.hbase.client.Scan;
+042import 
org.apache.hadoop.hbase.client.Table;
+043import 
org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
+044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+045import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+046import 
org.apache.hadoop.hbase.util.Bytes;
+047
+048import org.junit.AfterClass;
+049import org.junit.BeforeClass;
+050import org.junit.ClassRule;
+051import org.junit.Test;
+052import 
org.junit.experimental.categories.Category;
+053
+054@Category({MediumTests.class})
+055public class 
TestCleanupCompactedFileOnRegionClose {
+056
+057  @ClassRule
+058  public static final HBaseClassTestRule 
CLASS_RULE =
+059  
HBaseClassTestRule.forClass(TestCleanupCompactedFileOnRegionClose.class);
+060
+061  private static HBaseTestingUtility 
util;
+062
+063  @BeforeClass
+064  public static void beforeClass() throws 
Exception {
+065util = new HBaseTestingUtility();
+066
util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,100);
+067
util.getConfiguration().set("dfs.blocksize", "64000");
+068
util.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
+069
util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY,"0");
+070util.startMiniCluster(2);
+071  }
+072
+073  @AfterClass
+074  public static void afterclass() throws 
Exception {
+075util.shutdownMiniCluster();
+076  }
+077
+078  @Test
+079  public void testCleanupOnClose() throws 
Exception {
+080TableName tableName = 
TableName.valueOf("testCleanupOnClose");
+081String familyName = "f";
+082byte[] familyNameBytes = 
Bytes.toBytes(familyName);
+083util.createTable(tableName, 
familyName);
+084
+085HBaseAdmin hBaseAdmin = 
util.getHBaseAdmin();
+086Table table = 
util.getConnection().getTable(tableName);
+087
+088HRegionServer rs = 
util.getRSForFirstRegionInTable(tableName);
+089Region region = 
rs.getRegions(tableName).get(0);
+090
+091int refSFCount = 4;
+092for (int i = 0; i  refSFCount; 
i++) {
+093  for (int j = 0; j  refSFCount; 
j++) {
+094Put put = new 
Put(Bytes.toBytes(j));
+095put.addColumn(familyNameBytes, 
Bytes.toBytes(i), Bytes.toBytes(j));
+096table.put(put);
+097