hbase git commit: HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer

2018-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-21512 [created] ccb8b0bdf


HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccb8b0bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccb8b0bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccb8b0bd

Branch: refs/heads/HBASE-21512
Commit: ccb8b0bdfd53b044f936f79e8aed37737dbdf346
Parents: 766aa1b
Author: zhangduo 
Authored: Fri Nov 30 08:23:47 2018 +0800
Committer: zhangduo 
Committed: Sat Dec 1 12:54:59 2018 +0800

--
 .../hbase/client/AsyncClusterConnection.java| 38 
 .../hbase/client/AsyncConnectionImpl.java   | 39 ++--
 .../hbase/client/ClusterConnectionFactory.java  | 63 
 .../hadoop/hbase/client/ConnectionFactory.java  |  5 +-
 .../hadoop/hbase/util/ReflectionUtils.java  | 22 ---
 .../java/org/apache/hadoop/hbase/Server.java| 23 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java |  3 +
 .../hbase/regionserver/HRegionServer.java   | 62 +--
 .../regionserver/ReplicationSyncUp.java |  6 ++
 .../hadoop/hbase/MockRegionServerServices.java  |  6 ++
 .../client/TestAsyncNonMetaRegionLocator.java   |  2 +-
 ...syncNonMetaRegionLocatorConcurrenyLimit.java |  2 +-
 .../client/TestAsyncRegionLocatorTimeout.java   |  2 +-
 ...TestAsyncSingleRequestRpcRetryingCaller.java |  4 +-
 .../hbase/client/TestAsyncTableNoncedRetry.java |  2 +-
 .../hbase/master/MockNoopMasterServices.java|  6 ++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 ++
 .../hbase/master/TestActiveMasterManager.java   |  6 ++
 .../hbase/master/cleaner/TestHFileCleaner.java  |  6 ++
 .../master/cleaner/TestHFileLinkCleaner.java|  6 ++
 .../hbase/master/cleaner/TestLogsCleaner.java   |  6 ++
 .../cleaner/TestReplicationHFileCleaner.java|  6 ++
 .../regionserver/TestHeapMemoryManager.java |  6 ++
 .../hbase/regionserver/TestSplitLogWorker.java  |  6 ++
 .../hbase/regionserver/TestWALLockup.java   |  6 ++
 .../TestReplicationTrackerZKImpl.java   |  6 ++
 .../TestReplicationSourceManager.java   |  6 ++
 .../security/token/TestTokenAuthentication.java |  6 ++
 .../apache/hadoop/hbase/util/MockServer.java|  7 ++-
 29 files changed, 309 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccb8b0bd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
new file mode 100644
index 000..c7dea25
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The asynchronous connection for internal usage.
+ */
+@InterfaceAudience.Private
+public interface AsyncClusterConnection extends AsyncConnection {
+
+  /**
+   * Get the nonce generator for this connection.
+   */
+  NonceGenerator getNonceGenerator();
+
+  /**
+   * Get the rpc client we used to communicate with other servers.
+   */
+  RpcClient getRpcClient();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccb8b0bd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index a05764e..00c8f55 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 

hbase git commit: HBASE-21486 The current replication implementation for peer in STANDBY state breaks serial replication

2018-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master dfeab9f5c -> 766aa1bfc


HBASE-21486 The current replication implementation for peer in STANDBY state 
breaks serial replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/766aa1bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/766aa1bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/766aa1bf

Branch: refs/heads/master
Commit: 766aa1bfccb48b4d228dd86c100fb48e9c9d61fa
Parents: dfeab9f
Author: Duo Zhang 
Authored: Wed Nov 28 18:00:18 2018 +0800
Committer: zhangduo 
Committed: Sat Dec 1 12:15:18 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  19 ++--
 .../replication/AbstractPeerProcedure.java  |  97 -
 .../master/replication/ModifyPeerProcedure.java |  81 --
 ...ransitPeerSyncReplicationStateProcedure.java |  73 +
 .../replication/SyncReplicationTestBase.java|  30 --
 .../replication/TestSerialSyncReplication.java  | 106 +++
 .../TestSyncReplicationRemoveRemoteWAL.java |  21 +---
 7 files changed, 291 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 44ac952..cc0c6ba 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -398,15 +398,16 @@ enum PeerSyncReplicationStateTransitionState {
   PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION = 1;
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
-  REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
-  REOPEN_ALL_REGIONS_IN_PEER = 6;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
-  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
-  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
-  CREATE_DIR_FOR_REMOTE_WAL = 11;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
+  REOPEN_ALL_REGIONS_IN_PEER = 4;
+  SYNC_REPLICATION_UPDATE_LAST_PUSHED_SEQ_ID_FOR_SERIAL_PEER = 5;
+  REPLAY_REMOTE_WAL_IN_PEER = 6;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 7;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 8;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 9;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 10;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 11;
+  CREATE_DIR_FOR_REMOTE_WAL = 12;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 13;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 882a050..755e0a3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -17,11 +17,27 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import 
org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
@@ -29,8 +45,15 @@ import 

[1/2] hbase git commit: Revert "HBASE-21464 Splitting blocked with meta NSRE during split transaction"

2018-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fd175a4d4 -> 547f3fc2d
  refs/heads/branch-1.4 503fbe8a3 -> 35125bc7b


Revert "HBASE-21464 Splitting blocked with meta NSRE during split transaction"

This reverts commit 70d9934e40785d13c9220d4b3ab0c3b31ca59f8c.

Committed change is insufficient. Was able to reproduce the problem again.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/35125bc7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/35125bc7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/35125bc7

Branch: refs/heads/branch-1.4
Commit: 35125bc7b114263f780d37d6cbe9883619adc2de
Parents: 503fbe8
Author: Andrew Purtell 
Authored: Fri Nov 30 14:40:23 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 14:41:27 2018 -0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 95 +++-
 .../hadoop/hbase/client/ConnectionManager.java  | 10 +--
 2 files changed, 56 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/35125bc7/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index fd7a97b..440f8c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -175,7 +175,8 @@ public class MetaTableAccessor {
* @return An {@link Table} for hbase:meta
* @throws IOException
*/
-  static Table getMetaHTable(final Connection connection) throws IOException {
+  static Table getMetaHTable(final Connection connection)
+  throws IOException {
 // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
 if (connection == null) {
   throw new NullPointerException("No connection");
@@ -247,13 +248,11 @@ public class MetaTableAccessor {
 }
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  Result r = get(metaTable, get);
-  RegionLocations locations = getRegionLocations(r);
-  return locations == null
-  ? null
-  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
-}
+Result r = get(getMetaHTable(connection), get);
+RegionLocations locations = getRegionLocations(r);
+return locations == null
+  ? null
+  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
   }
 
   /**
@@ -268,10 +267,8 @@ public class MetaTableAccessor {
 byte[] row = getMetaKeyForRegion(regionInfo);
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  Result r = get(metaTable, get);
-  return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
-}
+Result r = get(getMetaHTable(connection), get);
+return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
   }
 
   /** Returns the row key to use for this regionInfo */
@@ -303,9 +300,7 @@ public class MetaTableAccessor {
   byte[] regionName) throws IOException {
 Get get = new Get(regionName);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  return get(metaTable, get);
-}
+return get(getMetaHTable(connection), get);
   }
 
   /**
@@ -636,19 +631,19 @@ public class MetaTableAccessor {
   scan.setCaching(caching);
 }
 scan.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  try (ResultScanner scanner = metaTable.getScanner(scan)) {
-Result data;
-while ((data = scanner.next()) != null) {
-  if (data.isEmpty()) {
-continue;
-  }
-  // Break if visit returns false.
-  if (!visitor.visit(data)) {
-break;
-  }
-}
+Table metaTable = getMetaHTable(connection);
+ResultScanner scanner = null;
+try {
+  scanner = metaTable.getScanner(scan);
+  Result data;
+  while((data = scanner.next()) != null) {
+if (data.isEmpty()) continue;
+// Break if visit returns false.
+if (!visitor.visit(data)) break;
   }
+} finally {
+  if (scanner != null) scanner.close();
+  metaTable.close();
 }
   }
 
@@ -1025,9 +1020,7 @@ public class MetaTableAccessor {
*/
   static void putToMetaTable(final Connection connection, final Put p)
 throws IOException {
-try (Table metaTable = getMetaHTable(connection)) {
-  

[2/2] hbase git commit: Revert "HBASE-21464 Splitting blocked with meta NSRE during split transaction"

2018-11-30 Thread apurtell
Revert "HBASE-21464 Splitting blocked with meta NSRE during split transaction"

This reverts commit bd87f4ebcd49e9a3d100bba81fd6ab8868027c06

Committed change is insufficient. Was able to reproduce the problem again.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/547f3fc2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/547f3fc2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/547f3fc2

Branch: refs/heads/branch-1
Commit: 547f3fc2dcb00db007e931091cdbb436bed4451a
Parents: fd175a4
Author: Andrew Purtell 
Authored: Fri Nov 30 14:40:23 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 14:41:57 2018 -0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 95 +++-
 .../hadoop/hbase/client/ConnectionManager.java  | 10 +--
 2 files changed, 56 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/547f3fc2/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index fd7a97b..440f8c6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -175,7 +175,8 @@ public class MetaTableAccessor {
* @return An {@link Table} for hbase:meta
* @throws IOException
*/
-  static Table getMetaHTable(final Connection connection) throws IOException {
+  static Table getMetaHTable(final Connection connection)
+  throws IOException {
 // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
 if (connection == null) {
   throw new NullPointerException("No connection");
@@ -247,13 +248,11 @@ public class MetaTableAccessor {
 }
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  Result r = get(metaTable, get);
-  RegionLocations locations = getRegionLocations(r);
-  return locations == null
-  ? null
-  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
-}
+Result r = get(getMetaHTable(connection), get);
+RegionLocations locations = getRegionLocations(r);
+return locations == null
+  ? null
+  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
   }
 
   /**
@@ -268,10 +267,8 @@ public class MetaTableAccessor {
 byte[] row = getMetaKeyForRegion(regionInfo);
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  Result r = get(metaTable, get);
-  return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
-}
+Result r = get(getMetaHTable(connection), get);
+return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
   }
 
   /** Returns the row key to use for this regionInfo */
@@ -303,9 +300,7 @@ public class MetaTableAccessor {
   byte[] regionName) throws IOException {
 Get get = new Get(regionName);
 get.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  return get(metaTable, get);
-}
+return get(getMetaHTable(connection), get);
   }
 
   /**
@@ -636,19 +631,19 @@ public class MetaTableAccessor {
   scan.setCaching(caching);
 }
 scan.addFamily(HConstants.CATALOG_FAMILY);
-try (Table metaTable = getMetaHTable(connection)) {
-  try (ResultScanner scanner = metaTable.getScanner(scan)) {
-Result data;
-while ((data = scanner.next()) != null) {
-  if (data.isEmpty()) {
-continue;
-  }
-  // Break if visit returns false.
-  if (!visitor.visit(data)) {
-break;
-  }
-}
+Table metaTable = getMetaHTable(connection);
+ResultScanner scanner = null;
+try {
+  scanner = metaTable.getScanner(scan);
+  Result data;
+  while((data = scanner.next()) != null) {
+if (data.isEmpty()) continue;
+// Break if visit returns false.
+if (!visitor.visit(data)) break;
   }
+} finally {
+  if (scanner != null) scanner.close();
+  metaTable.close();
 }
   }
 
@@ -1025,9 +1020,7 @@ public class MetaTableAccessor {
*/
   static void putToMetaTable(final Connection connection, final Put p)
 throws IOException {
-try (Table metaTable = getMetaHTable(connection)) {
-  put(metaTable, p);
-}
+put(getMetaHTable(connection), p);
   }
 
   /**
@@ -1051,8 +1044,11 @@ public class MetaTableAccessor {
  

hbase git commit: Update POMs and CHANGES.txt for 1.4.9rc0

2018-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 9e90b0680 -> 503fbe8a3
Updated Tags:  refs/tags/1.4.9RC0 [created] 660bd81bf


Update POMs and CHANGES.txt for 1.4.9rc0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/503fbe8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/503fbe8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/503fbe8a

Branch: refs/heads/branch-1.4
Commit: 503fbe8a3fc69c00a62266a4dcb436d1218b3146
Parents: 9e90b06
Author: Andrew Purtell 
Authored: Fri Nov 30 12:05:02 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 12:07:55 2018 -0800

--
 CHANGES.txt | 32 
 hbase-annotations/pom.xml   |  2 +-
 .../hbase-archetype-builder/pom.xml |  2 +-
 hbase-archetypes/hbase-client-project/pom.xml   |  2 +-
 .../hbase-shaded-client-project/pom.xml |  2 +-
 hbase-archetypes/pom.xml|  2 +-
 hbase-assembly/pom.xml  |  2 +-
 hbase-checkstyle/pom.xml|  4 +--
 hbase-client/pom.xml|  2 +-
 hbase-common/pom.xml|  2 +-
 hbase-error-prone/pom.xml   |  4 +--
 hbase-examples/pom.xml  |  2 +-
 hbase-external-blockcache/pom.xml   |  2 +-
 hbase-hadoop-compat/pom.xml |  2 +-
 hbase-hadoop2-compat/pom.xml|  2 +-
 hbase-it/pom.xml|  2 +-
 hbase-metrics-api/pom.xml   |  2 +-
 hbase-metrics/pom.xml   |  2 +-
 hbase-prefix-tree/pom.xml   |  2 +-
 hbase-procedure/pom.xml |  2 +-
 hbase-protocol/pom.xml  |  2 +-
 hbase-resource-bundle/pom.xml   |  2 +-
 hbase-rest/pom.xml  |  2 +-
 hbase-rsgroup/pom.xml   |  2 +-
 hbase-server/pom.xml|  2 +-
 .../hbase-shaded-check-invariants/pom.xml   |  2 +-
 hbase-shaded/hbase-shaded-client/pom.xml|  2 +-
 hbase-shaded/hbase-shaded-server/pom.xml|  2 +-
 hbase-shaded/pom.xml|  2 +-
 hbase-shell/pom.xml |  2 +-
 hbase-testing-util/pom.xml  |  2 +-
 hbase-thrift/pom.xml|  2 +-
 pom.xml |  2 +-
 33 files changed, 66 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/503fbe8a/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index b9d6764..ed5e3ca 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,5 +1,37 @@
 HBase Change Log
 
+Release Notes - HBase - Version 1.4.9 12/7/2018
+
+** Sub-task
+* [HBASE-21347] - Backport HBASE-21200 "Memstore flush doesn't finish 
because of seekToPreviousRow() in memstore scanner." to branch-1
+* [HBASE-21473] - RowIndexSeekerV1 may return cell with extra two \x00\x00 
bytes which has no tags
+
+** Bug
+* [HBASE-20604] - ProtobufLogReader#readNext can incorrectly loop to the 
same position in the stream until the the WAL is rolled
+* [HBASE-21266] - Not running balancer because processing dead 
regionservers, but empty dead rs list
+* [HBASE-21275] - Thrift Server (branch 1 fix) -> Disable TRACE HTTP 
method for thrift http server (branch 1 only)
+* [HBASE-21355] - HStore's storeSize is calculated repeatedly which 
causing the confusing region split 
+* [HBASE-21357] - RS should abort if OOM in Reader thread
+* [HBASE-21359] - Fix build problem against Hadoop 2.8.5
+* [HBASE-21387] - Race condition surrounding in progress snapshot handling 
in snapshot cache leads to loss of snapshot files
+* [HBASE-21417] - Pre commit build is broken due to surefire plugin crashes
+* [HBASE-21424] - Change flakies and nightlies so scheduled less often
+* [HBASE-21439] - StochasticLoadBalancer RegionLoads aren’t being used 
in RegionLoad cost functions
+* [HBASE-21445] - CopyTable by bulkload will write hfile into yarn's HDFS 
+* [HBASE-21464] - Splitting blocked with meta NSRE during split transaction
+* [HBASE-21504] - If enable FIFOCompactionPolicy, a compaction may write a 
"empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be 
archived.
+
+** Improvement
+* [HBASE-21103] - nightly test cache of yetus install needs to be more 
thorough in verification
+* [HBASE-21185] - WALPrettyPrinter: Additional useful info to be printed 
by wal printer tool, for debugability purposes
+* [HBASE-21263] - Mention compression algorithm 

hbase git commit: HBASE-21359 Fix build problem against Hadoop 2.8.5

2018-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 70d9934e4 -> 9e90b0680


HBASE-21359 Fix build problem against Hadoop 2.8.5


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e90b068
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e90b068
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e90b068

Branch: refs/heads/branch-1.4
Commit: 9e90b06801bddbcf42da75b46be3be41a5a038c9
Parents: 70d9934
Author: Andrew Purtell 
Authored: Fri Nov 30 12:03:36 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 12:04:56 2018 -0800

--
 .../src/main/resources/supplemental-models.xml| 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e90b068/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index bb24b0c..bc51fc4 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -1798,6 +1798,24 @@ Copyright (c) 2000-2005 INRIA, France Telecom
   
   
 
+  org.ow2.asm
+  asm
+  ASM: a very small and fast Java bytecode manipulation 
framework
+
+  
+
+  BSD 3-Clause License
+  
http://cvs.forge.objectweb.org/cgi-bin/viewcvs.cgi/*checkout*/asm/asm/LICENSE.txt?rev=1.3only_with_tag=ASM_3_1_MVN
+  repo
+  
+Copyright (c) 2000-2005 INRIA, France Telecom
+
+
+  
+
+  
+  
+
   org.fusesource.leveldbjni
   leveldbjni-all
 



hbase git commit: HBASE-21359 Fix build problem against Hadoop 2.8.5

2018-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 bd87f4ebc -> fd175a4d4


HBASE-21359 Fix build problem against Hadoop 2.8.5


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd175a4d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd175a4d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd175a4d

Branch: refs/heads/branch-1
Commit: fd175a4d488feac3eea5e5f08093f1b3c7392171
Parents: bd87f4e
Author: Andrew Purtell 
Authored: Fri Nov 30 12:03:36 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 12:03:56 2018 -0800

--
 .../src/main/resources/supplemental-models.xml| 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd175a4d/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index 79c3a93..165e758 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -1798,6 +1798,24 @@ Copyright (c) 2000-2005 INRIA, France Telecom
   
   
 
+  org.ow2.asm
+  asm
+  ASM: a very small and fast Java bytecode manipulation 
framework
+
+  
+
+  BSD 3-Clause License
+  
http://cvs.forge.objectweb.org/cgi-bin/viewcvs.cgi/*checkout*/asm/asm/LICENSE.txt?rev=1.3only_with_tag=ASM_3_1_MVN
+  repo
+  
+Copyright (c) 2000-2005 INRIA, France Telecom
+
+
+  
+
+  
+  
+
   org.fusesource.leveldbjni
   leveldbjni-all
 



[1/2] hbase git commit: HBASE-21464 Splitting blocked with meta NSRE during split transaction

2018-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f1e2f077f -> bd87f4ebc
  refs/heads/branch-1.4 302edf447 -> 70d9934e4


HBASE-21464 Splitting blocked with meta NSRE during split transaction

When looking up the locations of hbase:meta with useCache false, clear all 
previous
cache entries for it first

Fix Table reference leaks in MetaTableAccessor with try-with-resources

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bd87f4eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bd87f4eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bd87f4eb

Branch: refs/heads/branch-1
Commit: bd87f4ebcd49e9a3d100bba81fd6ab8868027c06
Parents: f1e2f07
Author: Andrew Purtell 
Authored: Thu Nov 29 15:46:21 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 09:57:54 2018 -0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 95 +---
 .../hadoop/hbase/client/ConnectionManager.java  | 10 ++-
 2 files changed, 49 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bd87f4eb/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 440f8c6..fd7a97b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -175,8 +175,7 @@ public class MetaTableAccessor {
* @return An {@link Table} for hbase:meta
* @throws IOException
*/
-  static Table getMetaHTable(final Connection connection)
-  throws IOException {
+  static Table getMetaHTable(final Connection connection) throws IOException {
 // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
 if (connection == null) {
   throw new NullPointerException("No connection");
@@ -248,11 +247,13 @@ public class MetaTableAccessor {
 }
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-Result r = get(getMetaHTable(connection), get);
-RegionLocations locations = getRegionLocations(r);
-return locations == null
-  ? null
-  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
+try (Table metaTable = getMetaHTable(connection)) {
+  Result r = get(metaTable, get);
+  RegionLocations locations = getRegionLocations(r);
+  return locations == null
+  ? null
+  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
+}
   }
 
   /**
@@ -267,8 +268,10 @@ public class MetaTableAccessor {
 byte[] row = getMetaKeyForRegion(regionInfo);
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-Result r = get(getMetaHTable(connection), get);
-return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
+try (Table metaTable = getMetaHTable(connection)) {
+  Result r = get(metaTable, get);
+  return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
+}
   }
 
   /** Returns the row key to use for this regionInfo */
@@ -300,7 +303,9 @@ public class MetaTableAccessor {
   byte[] regionName) throws IOException {
 Get get = new Get(regionName);
 get.addFamily(HConstants.CATALOG_FAMILY);
-return get(getMetaHTable(connection), get);
+try (Table metaTable = getMetaHTable(connection)) {
+  return get(metaTable, get);
+}
   }
 
   /**
@@ -631,19 +636,19 @@ public class MetaTableAccessor {
   scan.setCaching(caching);
 }
 scan.addFamily(HConstants.CATALOG_FAMILY);
-Table metaTable = getMetaHTable(connection);
-ResultScanner scanner = null;
-try {
-  scanner = metaTable.getScanner(scan);
-  Result data;
-  while((data = scanner.next()) != null) {
-if (data.isEmpty()) continue;
-// Break if visit returns false.
-if (!visitor.visit(data)) break;
+try (Table metaTable = getMetaHTable(connection)) {
+  try (ResultScanner scanner = metaTable.getScanner(scan)) {
+Result data;
+while ((data = scanner.next()) != null) {
+  if (data.isEmpty()) {
+continue;
+  }
+  // Break if visit returns false.
+  if (!visitor.visit(data)) {
+break;
+  }
+}
   }
-} finally {
-  if (scanner != null) scanner.close();
-  metaTable.close();
 }
   }
 
@@ -1020,7 +1025,9 @@ public class MetaTableAccessor {
*/
   static void putToMetaTable(final Connection connection, final Put p)
 throws IOException {
-

[2/2] hbase git commit: HBASE-21464 Splitting blocked with meta NSRE during split transaction

2018-11-30 Thread apurtell
HBASE-21464 Splitting blocked with meta NSRE during split transaction

When looking up the locations of hbase:meta with useCache false, clear all 
previous
cache entries for it first

Fix Table reference leaks in MetaTableAccessor with try-with-resources

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70d9934e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70d9934e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70d9934e

Branch: refs/heads/branch-1.4
Commit: 70d9934e40785d13c9220d4b3ab0c3b31ca59f8c
Parents: 302edf4
Author: Andrew Purtell 
Authored: Thu Nov 29 15:46:21 2018 -0800
Committer: Andrew Purtell 
Committed: Fri Nov 30 09:58:08 2018 -0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 95 +---
 .../hadoop/hbase/client/ConnectionManager.java  | 10 ++-
 2 files changed, 49 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70d9934e/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 440f8c6..fd7a97b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -175,8 +175,7 @@ public class MetaTableAccessor {
* @return An {@link Table} for hbase:meta
* @throws IOException
*/
-  static Table getMetaHTable(final Connection connection)
-  throws IOException {
+  static Table getMetaHTable(final Connection connection) throws IOException {
 // We used to pass whole CatalogTracker in here, now we just pass in 
Connection
 if (connection == null) {
   throw new NullPointerException("No connection");
@@ -248,11 +247,13 @@ public class MetaTableAccessor {
 }
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-Result r = get(getMetaHTable(connection), get);
-RegionLocations locations = getRegionLocations(r);
-return locations == null
-  ? null
-  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
+try (Table metaTable = getMetaHTable(connection)) {
+  Result r = get(metaTable, get);
+  RegionLocations locations = getRegionLocations(r);
+  return locations == null
+  ? null
+  : locations.getRegionLocation(parsedInfo == null ? 0 : 
parsedInfo.getReplicaId());
+}
   }
 
   /**
@@ -267,8 +268,10 @@ public class MetaTableAccessor {
 byte[] row = getMetaKeyForRegion(regionInfo);
 Get get = new Get(row);
 get.addFamily(HConstants.CATALOG_FAMILY);
-Result r = get(getMetaHTable(connection), get);
-return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
+try (Table metaTable = getMetaHTable(connection)) {
+  Result r = get(metaTable, get);
+  return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
+}
   }
 
   /** Returns the row key to use for this regionInfo */
@@ -300,7 +303,9 @@ public class MetaTableAccessor {
   byte[] regionName) throws IOException {
 Get get = new Get(regionName);
 get.addFamily(HConstants.CATALOG_FAMILY);
-return get(getMetaHTable(connection), get);
+try (Table metaTable = getMetaHTable(connection)) {
+  return get(metaTable, get);
+}
   }
 
   /**
@@ -631,19 +636,19 @@ public class MetaTableAccessor {
   scan.setCaching(caching);
 }
 scan.addFamily(HConstants.CATALOG_FAMILY);
-Table metaTable = getMetaHTable(connection);
-ResultScanner scanner = null;
-try {
-  scanner = metaTable.getScanner(scan);
-  Result data;
-  while((data = scanner.next()) != null) {
-if (data.isEmpty()) continue;
-// Break if visit returns false.
-if (!visitor.visit(data)) break;
+try (Table metaTable = getMetaHTable(connection)) {
+  try (ResultScanner scanner = metaTable.getScanner(scan)) {
+Result data;
+while ((data = scanner.next()) != null) {
+  if (data.isEmpty()) {
+continue;
+  }
+  // Break if visit returns false.
+  if (!visitor.visit(data)) {
+break;
+  }
+}
   }
-} finally {
-  if (scanner != null) scanner.close();
-  metaTable.close();
 }
   }
 
@@ -1020,7 +1025,9 @@ public class MetaTableAccessor {
*/
   static void putToMetaTable(final Connection connection, final Put p)
 throws IOException {
-put(getMetaHTable(connection), p);
+try (Table metaTable = getMetaHTable(connection)) {
+  put(metaTable, p);
+}
   

[23/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 8e0b84b..3110163 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -555,24 +555,24 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
-org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.TableState.State
-org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
-org.apache.hadoop.hbase.client.CompactType
-org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
-org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
 org.apache.hadoop.hbase.client.CompactionState
-org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
+org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.MasterSwitchType
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
+org.apache.hadoop.hbase.client.IsolationLevel
 org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.Durability
-org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.SnapshotType
+org.apache.hadoop.hbase.client.TableState.State
+org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.Scan.ReadType
+org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
index e67b778..329f62f 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
@@ -201,8 +201,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.coprocessor.MetaTableMetrics.MetaTableOps
 org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType
+org.apache.hadoop.hbase.coprocessor.MetaTableMetrics.MetaTableOps
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
index ce950b0..ae0124e 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
@@ -104,8 +104,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 

[24/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index f7aa217..f11e74d 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -168,9 +168,9 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
-org.apache.hadoop.hbase.backup.BackupType
 org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
+org.apache.hadoop.hbase.backup.BackupType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 3164f8f..990213e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class ConnectionImplementation.MasterServiceState
+static class ConnectionImplementation.MasterServiceState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 State of the MasterService connection/setup.
 
@@ -222,7 +222,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 connection
-Connection connection
+Connection connection
 
 
 
@@ -231,7 +231,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 stub
-org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
+org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
 
 
 
@@ -240,7 +240,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 userCount
-int userCount
+int userCount
 
 
 
@@ -257,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 MasterServiceState
-MasterServiceState(Connectionconnection)
+MasterServiceState(Connectionconnection)
 
 
 
@@ -274,7 +274,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -287,7 +287,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getStub
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetStub()
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetStub()
 
 
 
@@ -296,7 +296,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 clearStub
-voidclearStub()
+voidclearStub()
 
 
 
@@ -305,7 +305,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isMasterRunning
-booleanisMasterRunning()
+booleanisMasterRunning()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 

[07/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestHBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
index da74c1c..8a4c5fe 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHBaseTestingUtility
+public class TestHBaseTestingUtility
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test our testing utility class
 
@@ -142,6 +142,18 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 org.junit.rules.TestName
 name
 
+
+private static int
+NUMREGIONS
+
+
+private static int
+NUMROWS
+
+
+private static int
+NUMTABLES
+
 
 
 
@@ -176,59 +188,63 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testMiniCluster()
+testKillMiniHBaseCluster()
 
 
 void
-testMiniClusterBindToWildcard()
+testMiniCluster()
 
 
 void
-testMiniClusterWithSSLOn()
+testMiniClusterBindToWildcard()
 
 
 void
-testMiniDFSCluster()
+testMiniClusterWithSSLOn()
 
 
 void
-testMiniZooKeeperWithMultipleClientPorts()
+testMiniDFSCluster()
 
 
 void
-testMiniZooKeeperWithMultipleServers()
+testMiniZooKeeperWithMultipleClientPorts()
 
 
 void
-testMiniZooKeeperWithOneServer()
+testMiniZooKeeperWithMultipleServers()
 
 
 void
+testMiniZooKeeperWithOneServer()
+
+
+void
 testMultiClusters()
 Basic sanity test that spins up multiple HDFS and HBase 
clusters that share
  the same ZK ensemble.
 
 
-
+
 void
 testMultipleStartStop()
 Test that we can start and stop multiple time a cluster
with the same HBaseTestingUtility.
 
 
-
+
 void
 testOverridingOfDefaultPorts()
 
-
+
 void
 testResolvePortConflict()
 
-
+
 void
 testSetupClusterTestBuildDir()
 
-
+
 void
 testTestDir()
 
@@ -254,13 +270,52 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Field Detail
+
+
+
+
+
+NUMTABLES
+private static finalint NUMTABLES
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+NUMROWS
+private static finalint NUMROWS
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+NUMREGIONS
+private static finalint NUMREGIONS
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -269,7 +324,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -278,7 +333,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -295,7 +350,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestHBaseTestingUtility
-publicTestHBaseTestingUtility()
+publicTestHBaseTestingUtility()
 
 
 
@@ -312,7 +367,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testMultiClusters
-publicvoidtestMultiClusters()
+publicvoidtestMultiClusters()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Basic sanity test that spins up multiple HDFS and HBase 
clusters that share
  the same ZK ensemble. We then create the same table in both and make sure
@@ -329,7 +384,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testMiniCluster
-publicvoidtestMiniCluster()
+publicvoidtestMiniCluster()
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -343,7 +398,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testMiniClusterBindToWildcard
-publicvoidtestMiniClusterBindToWildcard()
+publicvoidtestMiniClusterBindToWildcard()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -357,7 +412,7 @@ extends 

[13/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/source-repository.html
--
diff --git a/source-repository.html b/source-repository.html
index 86842b3..468cf15 100644
--- a/source-repository.html
+++ b/source-repository.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Source Code Management
 
@@ -309,7 +309,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-29
+  Last Published: 
2018-11-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/sponsors.html
--
diff --git a/sponsors.html b/sponsors.html
index cff1c62..4fe15c6 100644
--- a/sponsors.html
+++ b/sponsors.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Sponsors
 
@@ -343,7 +343,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-29
+  Last Published: 
2018-11-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/supportingprojects.html
--
diff --git a/supportingprojects.html b/supportingprojects.html
index 799075a..7890a7b 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Supporting Projects
 
@@ -530,7 +530,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-29
+  Last Published: 
2018-11-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/team-list.html
--
diff --git a/team-list.html b/team-list.html
index 05bac57..c831ffc 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Team
 
@@ -776,7 +776,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-29
+  Last Published: 
2018-11-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testapidocs/index-all.html
--
diff --git a/testapidocs/index-all.html b/testapidocs/index-all.html
index 2b408c9..d299bb0 100644
--- a/testapidocs/index-all.html
+++ b/testapidocs/index-all.html
@@ -849,6 +849,10 @@
 
 killMaster(ServerName)
 - Method in class org.apache.hadoop.hbase.MiniHBaseCluster
 
+killMiniHBaseCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
+
+Abruptly Shutdown HBase mini cluster.
+
 killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.MiniHBaseCluster
 
 killRegionServer(ServerName)
 - Method in class org.apache.hadoop.hbase.MiniHBaseCluster
@@ -1177,7 +1181,7 @@
 
 shutdownMiniHBaseCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
 
-Shutdown HBase mini cluster.
+Shutdown HBase mini cluster.Does not shutdown zk or dfs if 
running.
 
 shutdownMiniMapReduceCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
 



[19/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-737  throws 

[11/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index bc4887a..b12b756 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -1245,3081 +1245,3103 @@
 1237  }
 1238
 1239  /**
-1240   * Shutdown HBase mini cluster.  Does 
not shutdown zk or dfs if running.
-1241   */
-1242  public void shutdownMiniHBaseCluster() 
throws IOException {
-1243if (hbaseAdmin != null) {
-1244  hbaseAdmin.close();
-1245  hbaseAdmin = null;
-1246}
-1247if (this.connection != null) {
-1248  this.connection.close();
-1249  this.connection = null;
+1240   * Shutdown HBase mini cluster.Does 
not shutdown zk or dfs if running.
+1241   * @throws java.io.IOException in case 
command is unsuccessful
+1242   */
+1243  public void shutdownMiniHBaseCluster() 
throws IOException {
+1244cleanup();
+1245if (this.hbaseCluster != null) {
+1246  this.hbaseCluster.shutdown();
+1247  // Wait till hbase is down before 
going on to shutdown zk.
+1248  
this.hbaseCluster.waitUntilShutDown();
+1249  this.hbaseCluster = null;
 1250}
-1251// unset the configuration for MIN 
and MAX RS to start
-1252
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-1253
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-1254if (this.hbaseCluster != null) {
-1255  this.hbaseCluster.shutdown();
-1256  // Wait till hbase is down before 
going on to shutdown zk.
-1257  
this.hbaseCluster.waitUntilShutDown();
-1258  this.hbaseCluster = null;
-1259}
-1260if (zooKeeperWatcher != null) {
-1261  zooKeeperWatcher.close();
-1262  zooKeeperWatcher = null;
-1263}
-1264  }
-1265
-1266  /**
-1267   * Returns the path to the default 
root dir the minicluster uses. If codecreate/code
-1268   * is true, a new root directory path 
is fetched irrespective of whether it has been fetched
-1269   * before or not. If false, previous 
path is used.
-1270   * Note: this does not cause the root 
dir to be created.
-1271   * @return Fully qualified path for 
the default hbase root dir
-1272   * @throws IOException
-1273   */
-1274  public Path 
getDefaultRootDirPath(boolean create) throws IOException {
-1275if (!create) {
-1276  return getDataTestDirOnTestFS();
-1277} else {
-1278  return 
getNewDataTestDirOnTestFS();
-1279}
-1280  }
-1281
-1282  /**
-1283   * Same as {{@link 
HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
-1284   * except that 
codecreate/code flag is false.
-1285   * Note: this does not cause the root 
dir to be created.
-1286   * @return Fully qualified path for 
the default hbase root dir
-1287   * @throws IOException
-1288   */
-1289  public Path getDefaultRootDirPath() 
throws IOException {
-1290return 
getDefaultRootDirPath(false);
-1291  }
-1292
-1293  /**
-1294   * Creates an hbase rootdir in user 
home directory.  Also creates hbase
-1295   * version file.  Normally you won't 
make use of this method.  Root hbasedir
-1296   * is created for you as part of mini 
cluster startup.  You'd only use this
-1297   * method if you were doing manual 
operation.
-1298   * @param create This flag decides 
whether to get a new
-1299   * root or data directory path or not, 
if it has been fetched already.
-1300   * Note : Directory will be made 
irrespective of whether path has been fetched or not.
-1301   * If directory already exists, it 
will be overwritten
-1302   * @return Fully qualified path to 
hbase root dir
-1303   * @throws IOException
-1304   */
-1305  public Path createRootDir(boolean 
create) throws IOException {
-1306FileSystem fs = 
FileSystem.get(this.conf);
-1307Path hbaseRootdir = 
getDefaultRootDirPath(create);
-1308FSUtils.setRootDir(this.conf, 
hbaseRootdir);
-1309fs.mkdirs(hbaseRootdir);
-1310FSUtils.setVersion(fs, 
hbaseRootdir);
-1311return hbaseRootdir;
-1312  }
-1313
-1314  /**
-1315   * Same as {@link 
HBaseTestingUtility#createRootDir(boolean create)}
-1316   * except that 
codecreate/code flag is false.
-1317   * @return Fully qualified path to 
hbase root dir
-1318   * @throws IOException
-1319   */
-1320  public Path createRootDir() throws 
IOException {
-1321return createRootDir(false);
-1322  }
-1323
-1324  /**
-1325   * Creates a hbase walDir in the 
user's home directory.
-1326   * Normally you won't make use of this 
method. Root hbaseWALDir
-1327   * is created for you as part of mini 
cluster startup. You'd only use this
-1328   * method if you were doing manual 
operation.
-1329   *
-1330   * @return Fully qualified path to 
hbase 

[20/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
index 981ebcd..b8c030f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
@@ -237,1811 +237,1817 @@
 229  }
 230
 231  private void waitForSystemTable(Admin 
admin, TableName tableName) throws IOException {
-232long TIMEOUT = 6;
-233long startTime = 
EnvironmentEdgeManager.currentTime();
-234while (!admin.tableExists(tableName) 
|| !admin.isTableAvailable(tableName)) {
-235  try {
-236Thread.sleep(100);
-237  } catch (InterruptedException e) 
{
-238  }
-239  if 
(EnvironmentEdgeManager.currentTime() - startTime  TIMEOUT) {
-240throw new IOException(
-241  "Failed to create backup system 
table " + tableName + " after " + TIMEOUT + "ms");
-242  }
-243}
-244LOG.debug("Backup table " + tableName 
+ " exists and available");
-245  }
-246
-247  @Override
-248  public void close() {
-249// do nothing
-250  }
-251
-252  /**
-253   * Updates status (state) of a backup 
session in backup system table table
-254   * @param info backup info
-255   * @throws IOException exception
-256   */
-257  public void updateBackupInfo(BackupInfo 
info) throws IOException {
-258if (LOG.isTraceEnabled()) {
-259  LOG.trace("update backup status in 
backup system table for: " + info.getBackupId()
-260+ " set status=" + 
info.getState());
-261}
-262try (Table table = 
connection.getTable(tableName)) {
-263  Put put = 
createPutForBackupInfo(info);
-264  table.put(put);
-265}
-266  }
-267
-268  /*
-269   * @param backupId the backup Id
-270   * @return Map of rows to path of bulk 
loaded hfile
-271   */
-272  Mapbyte[], String 
readBulkLoadedFiles(String backupId) throws IOException {
-273Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-274try (Table table = 
connection.getTable(bulkLoadTableName);
-275ResultScanner scanner = 
table.getScanner(scan)) {
-276  Result res = null;
-277  Mapbyte[], String map = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-278  while ((res = scanner.next()) != 
null) {
-279res.advance();
-280byte[] row = 
CellUtil.cloneRow(res.listCells().get(0));
-281for (Cell cell : res.listCells()) 
{
-282  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
-283
BackupSystemTable.PATH_COL.length) == 0) {
-284map.put(row, 
Bytes.toString(CellUtil.cloneValue(cell)));
-285  }
-286}
-287  }
-288  return map;
-289}
-290  }
-291
-292  /*
-293   * Used during restore
-294   * @param backupId the backup Id
-295   * @param sTableList List of tables
-296   * @return array of Map of family to 
List of Paths
-297   */
-298  public Mapbyte[], 
ListPath[] readBulkLoadedFiles(String backupId, 
ListTableName sTableList)
-299  throws IOException {
-300Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-301Mapbyte[], ListPath[] 
mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
-302try (Table table = 
connection.getTable(bulkLoadTableName);
-303ResultScanner scanner = 
table.getScanner(scan)) {
-304  Result res = null;
-305  while ((res = scanner.next()) != 
null) {
-306res.advance();
-307TableName tbl = null;
-308byte[] fam = null;
-309String path = null;
-310for (Cell cell : res.listCells()) 
{
-311  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
-312
BackupSystemTable.TBL_COL.length) == 0) {
-313tbl = 
TableName.valueOf(CellUtil.cloneValue(cell));
-314  } else if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
-315
BackupSystemTable.FAM_COL.length) == 0) {
-316fam = 
CellUtil.cloneValue(cell);
-317  } else if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
-318
BackupSystemTable.PATH_COL.length) == 0) {
-319path = 
Bytes.toString(CellUtil.cloneValue(cell));
-320  }
-321}
-322int srcIdx = 
IncrementalTableBackupClient.getIndex(tbl, sTableList);
-323if (srcIdx == -1) {
-324  // the table is not among the 
query
-325  continue;
-326}
-327if (mapForSrc[srcIdx] == null) 
{
-328  mapForSrc[srcIdx] = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-329}
-330ListPath files;
-331if 
(!mapForSrc[srcIdx].containsKey(fam)) {
-332  files = new 

[12/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 211aacd..ea5be52 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":9,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":41,"i26":41,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":10,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":42,"i68":42,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":9,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":9,"i93":9,"i94":10,"i95":9,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":9,"i107":9,"i108":42,"i109":10,"i110":10,"i11
 
1":10,"i112":9,"i113":42,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":9,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":9,"i138":9,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":9,"i145":9,"i146":10,"i147":9,"i148":10,"i149":10,"i150":10,"i151":10,"i152":9,"i153":9,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":42,"i162":10,"i163":42,"i164":42,"i165":42,"i166":42,"i167":42,"i168":42,"i169":42,"i170":42,"i171":42,"i172":42,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":42,"i181":42,"i182":42,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":9,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10,"
 i212":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":9,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":41,"i26":41,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":10,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":42,"i68":42,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":9,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":9,"i93":9,"i94":10,"i95":9,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":9,"i107":9,"i108":42,"i109":10,"i110":10,"i11
 
1":10,"i112":9,"i113":42,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":9,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":9,"i139":9,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":9,"i146":9,"i147":10,"i148":9,"i149":10,"i150":10,"i151":10,"i152":10,"i153":9,"i154":9,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":42,"i163":10,"i164":42,"i165":42,"i166":42,"i167":42,"i168":42,"i169":42,"i170":42,"i171":42,"i172":42,"i173":42,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":42,"i182":42,"i183":42,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":9,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10,"
 i212":10,"i213":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -1187,24 +1187,30 @@ extends 
 
 void
+killMiniHBaseCluster()
+Abruptly Shutdown HBase mini cluster.
+
+
+
+void
 loadNumericRows(Tablet,
byte[]f,
intstartRow,
 

[02/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestHBaseTestingUtility.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
index 97c4d20..25ea240 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestHBaseTestingUtility.html
@@ -34,452 +34,479 @@
 026import static org.mockito.Mockito.when;
 027
 028import java.io.File;
-029import java.io.IOException;
-030import java.util.HashMap;
-031import java.util.List;
-032import java.util.Map;
-033import java.util.Map.Entry;
-034import java.util.Random;
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.FileUtil;
-038import org.apache.hadoop.fs.Path;
-039import 
org.apache.hadoop.hbase.client.Get;
-040import 
org.apache.hadoop.hbase.client.Put;
-041import 
org.apache.hadoop.hbase.client.Result;
-042import 
org.apache.hadoop.hbase.client.Table;
-043import 
org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil;
-044import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-045import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-046import 
org.apache.hadoop.hbase.util.Bytes;
-047import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-048import 
org.apache.hadoop.hdfs.MiniDFSCluster;
-049import org.junit.ClassRule;
-050import org.junit.Rule;
-051import org.junit.Test;
-052import 
org.junit.experimental.categories.Category;
-053import org.junit.rules.TestName;
-054import org.mockito.Mockito;
-055import 
org.mockito.invocation.InvocationOnMock;
-056import org.mockito.stubbing.Answer;
-057import org.slf4j.Logger;
-058import org.slf4j.LoggerFactory;
-059
-060/**
-061 * Test our testing utility class
-062 */
-063@Category({MiscTests.class, 
LargeTests.class})
-064public class TestHBaseTestingUtility {
-065
-066  @ClassRule
-067  public static final HBaseClassTestRule 
CLASS_RULE =
-068  
HBaseClassTestRule.forClass(TestHBaseTestingUtility.class);
-069
-070  private static final Logger LOG = 
LoggerFactory.getLogger(TestHBaseTestingUtility.class);
-071
-072  @Rule
-073  public TestName name = new 
TestName();
-074
-075  /**
-076   * Basic sanity test that spins up 
multiple HDFS and HBase clusters that share
-077   * the same ZK ensemble. We then create 
the same table in both and make sure
-078   * that what we insert in one place 
doesn't end up in the other.
-079   * @throws Exception
-080   */
-081  @Test
-082  public void testMultiClusters() throws 
Exception {
-083// Create three clusters
-084
-085// Cluster 1.
-086HBaseTestingUtility htu1 = new 
HBaseTestingUtility();
-087// Set a different zk path for each 
cluster
-088
htu1.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
-089htu1.startMiniZKCluster();
-090
-091// Cluster 2
-092HBaseTestingUtility htu2 = new 
HBaseTestingUtility();
-093
htu2.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
-094
htu2.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,
-095  
htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT, "-1"));
-096
htu2.setZkCluster(htu1.getZkCluster());
-097
-098// Cluster 3; seed it with the conf 
from htu1 so we pickup the 'right'
-099// zk cluster config; it is set back 
into the config. as part of the
-100// start of minizkcluster.
-101HBaseTestingUtility htu3 = new 
HBaseTestingUtility();
-102
htu3.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
-103
htu3.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,
-104  
htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT, "-1"));
-105
htu3.setZkCluster(htu1.getZkCluster());
-106
-107try {
-108  htu1.startMiniCluster();
-109  htu2.startMiniCluster();
-110  htu3.startMiniCluster();
-111
-112  final TableName tableName = 
TableName.valueOf(name.getMethodName());
-113  final byte[] FAM_NAME = 
Bytes.toBytes("fam");
-114  final byte[] ROW = 
Bytes.toBytes("row");
-115  final byte[] QUAL_NAME = 
Bytes.toBytes("qual");
-116  final byte[] VALUE = 
Bytes.toBytes("value");
-117
-118  Table table1 = 
htu1.createTable(tableName, FAM_NAME);
-119  Table table2 = 
htu2.createTable(tableName, FAM_NAME);
-120
-121  Put put = new Put(ROW);
-122  put.addColumn(FAM_NAME, QUAL_NAME, 
VALUE);
-123  table1.put(put);
-124
-125  Get get = new Get(ROW);
-126  get.addColumn(FAM_NAME, 
QUAL_NAME);
-127  Result res = table1.get(get);
-128  assertEquals(1, res.size());
-129
-130  res = table2.get(get);
-131  assertEquals(0, res.size());
-132
-133  table1.close();
-134  table2.close();
-135
-136} 

[08/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
index 15db0f1..82a8c92 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
@@ -285,7 +285,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.HBaseTestingUtility
-assertKVListsEqual,
 assertRegionOnlyOnServer,
 assertRegionOnServer,
 assignRegion,
 available,
 checksumRows,
 cleanupDataTestDirOnTestFS,
 cleanupDataTestDirOnTestFS,
 closeRegionAndWAL,
 closeRegionAndWAL,
 compact,
 compact,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegio
 n, createLocalHRegionWithInMemoryFlags,
 createLocalHTU,
 createLocalHTU,
 createMockRegionServerService,
 createMockRegionServerService,
 createMockRegionServerService, createMultiRegionsInMeta,
 createMultiRegionsInMeta,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable, createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreS
 plitLoadTestTable, createPreSplitLoadTestTable,
 createRandomTable,
 createRegionAndWAL,
 createRegionAndWAL,
 createRootDir,
 createRootDir,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable, createTable,
 createTable,
 createTable,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTestRegion,
 createWal,
 createWALRootDir, deleteNumericRows,
 deleteTable,
 deleteTableData,
 deleteTableIfAny,
 enableDebug,
 ensureSomeNonStoppedRegionServersAvailable,
 ensureSomeRegionServersAvailable, expireMasterSession,
 expireRegionServerSession,
 expireSession,
 expireSession,
 explainTableAvailability,
 explainTableState,
 findLastTableState,
 flush,
 flush,
 generateColumnDescriptors,
 generateColumnDescriptors,
 getAdmin,
 getAllOnlineRegions,
 getClosestRowBefore, getClusterKey,
 getConfiguration,
 getConnection,
 getDataTestDirOnTestFS,
 getDataTestDirOnTestFS,
 getDefaultRootDirPath,
 getDefaultRootDirPath,
 getDFSCluster,
 getDifferentUser,
 getFromStoreFile,
 getFromStoreFile,
 getHBaseAdmin,
 getHBaseCluster,
 getHBaseClusterInterface,
 getHbck,
 getMetaRSPort,
 getMetaTableDescriptor,
 getMetaTableDescriptorBuilder,
 getMetaTableRows,
 getMetaTableRows,
 getMiniHBaseCluster,
 getNumHFiles,
 getNumHFilesForRS,
 getOtherRegionServer,
 getRegionSplitStartKeys,
 getRSForFirstRegionInTable,
 getSplittableRegion,
 getSupportedCompressionAlgorithms,
 getTestFileSystem,
 isReadShortCircuitOn,
 loadNumericRows,
 loadRandomRows,
 loadRegion,
 loadRegion,
 loadRegion,
 loadTable, loadTable,
 loadTable,
 loadTable,
 loadTable,
 memStoreTSTagsAndOffheapCombination,
 modifyTableSync,
 moveRegionAndWait,
 predicateNoRegionsInTransition,
 predicateTableAvailable,
 predicateTableDisabled,
 predicateTableEnabled,
 randomFreePort,
 randomMul
 tiCastAddress, restartHBaseCluster,
 safeGetAsStr,
 setDFSCluster,
 setDFSCluster,
 setFileSystemURI,
 setHBaseCluster,
 setMaxRecoveryErrorCount,
 setReplicas,
 setupDataTestDir,
 setupMiniKdc,
 shutdownMiniCluster,
 shutdownMiniDFSCluster,
 shutdownMiniHBaseCluster,
 shutdownMiniMapReduceCluster,
 startMiniClu
 ster, startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster, startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSClusterForTestWAL,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniMapReduceCluster,
 truncateTable,
 truncateTable,
 unassignRegion,
 unassignRegion,
 unassignRegionByRow,
 unassignRegionByRow,
 verifyNumericRows,
 verifyNumericRows,
 verifyNumericRows,
 verif
 yNumericRows, verifyNumericRows,
 verifyTableDescriptorIgnoreTableName,
 waitForHostPort,
 waitLabelAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableDisabled,
 waitTableDisabled,
 waitTableDisabled,
 waitTableEnabled,
 waitTableEnabled,
 waitTableEnabled,
 waitUntilAllRegionsAssigned,
 waitUntilAllRegionsAssigned,
 waitUntilAllSystemRegionsAssigned,
 

[04/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
index bc4887a..b12b756 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
@@ -1245,3081 +1245,3103 @@
 1237  }
 1238
 1239  /**
-1240   * Shutdown HBase mini cluster.  Does 
not shutdown zk or dfs if running.
-1241   */
-1242  public void shutdownMiniHBaseCluster() 
throws IOException {
-1243if (hbaseAdmin != null) {
-1244  hbaseAdmin.close();
-1245  hbaseAdmin = null;
-1246}
-1247if (this.connection != null) {
-1248  this.connection.close();
-1249  this.connection = null;
+1240   * Shutdown HBase mini cluster.Does 
not shutdown zk or dfs if running.
+1241   * @throws java.io.IOException in case 
command is unsuccessful
+1242   */
+1243  public void shutdownMiniHBaseCluster() 
throws IOException {
+1244cleanup();
+1245if (this.hbaseCluster != null) {
+1246  this.hbaseCluster.shutdown();
+1247  // Wait till hbase is down before 
going on to shutdown zk.
+1248  
this.hbaseCluster.waitUntilShutDown();
+1249  this.hbaseCluster = null;
 1250}
-1251// unset the configuration for MIN 
and MAX RS to start
-1252
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-1253
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-1254if (this.hbaseCluster != null) {
-1255  this.hbaseCluster.shutdown();
-1256  // Wait till hbase is down before 
going on to shutdown zk.
-1257  
this.hbaseCluster.waitUntilShutDown();
-1258  this.hbaseCluster = null;
-1259}
-1260if (zooKeeperWatcher != null) {
-1261  zooKeeperWatcher.close();
-1262  zooKeeperWatcher = null;
-1263}
-1264  }
-1265
-1266  /**
-1267   * Returns the path to the default 
root dir the minicluster uses. If codecreate/code
-1268   * is true, a new root directory path 
is fetched irrespective of whether it has been fetched
-1269   * before or not. If false, previous 
path is used.
-1270   * Note: this does not cause the root 
dir to be created.
-1271   * @return Fully qualified path for 
the default hbase root dir
-1272   * @throws IOException
-1273   */
-1274  public Path 
getDefaultRootDirPath(boolean create) throws IOException {
-1275if (!create) {
-1276  return getDataTestDirOnTestFS();
-1277} else {
-1278  return 
getNewDataTestDirOnTestFS();
-1279}
-1280  }
-1281
-1282  /**
-1283   * Same as {{@link 
HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
-1284   * except that 
codecreate/code flag is false.
-1285   * Note: this does not cause the root 
dir to be created.
-1286   * @return Fully qualified path for 
the default hbase root dir
-1287   * @throws IOException
-1288   */
-1289  public Path getDefaultRootDirPath() 
throws IOException {
-1290return 
getDefaultRootDirPath(false);
-1291  }
-1292
-1293  /**
-1294   * Creates an hbase rootdir in user 
home directory.  Also creates hbase
-1295   * version file.  Normally you won't 
make use of this method.  Root hbasedir
-1296   * is created for you as part of mini 
cluster startup.  You'd only use this
-1297   * method if you were doing manual 
operation.
-1298   * @param create This flag decides 
whether to get a new
-1299   * root or data directory path or not, 
if it has been fetched already.
-1300   * Note : Directory will be made 
irrespective of whether path has been fetched or not.
-1301   * If directory already exists, it 
will be overwritten
-1302   * @return Fully qualified path to 
hbase root dir
-1303   * @throws IOException
-1304   */
-1305  public Path createRootDir(boolean 
create) throws IOException {
-1306FileSystem fs = 
FileSystem.get(this.conf);
-1307Path hbaseRootdir = 
getDefaultRootDirPath(create);
-1308FSUtils.setRootDir(this.conf, 
hbaseRootdir);
-1309fs.mkdirs(hbaseRootdir);
-1310FSUtils.setVersion(fs, 
hbaseRootdir);
-1311return hbaseRootdir;
-1312  }
-1313
-1314  /**
-1315   * Same as {@link 
HBaseTestingUtility#createRootDir(boolean create)}
-1316   * except that 
codecreate/code flag is false.
-1317   * @return Fully qualified path to 
hbase root dir
-1318   * @throws IOException
-1319   */
-1320  public Path createRootDir() throws 
IOException {
-1321return createRootDir(false);
-1322  }
-1323
-1324  /**
-1325   * Creates a hbase walDir in the 
user's home directory.
-1326   * Normally you won't make use of this 
method. Root hbaseWALDir
-1327   * is created for you as part of mini 
cluster startup. You'd only use this
-1328   * method if 

[17/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final 

[26/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/32cb0f25
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/32cb0f25
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/32cb0f25

Branch: refs/heads/asf-site
Commit: 32cb0f25bcf6e796b4a8807b555f0f5046c8041e
Parents: a5c72fc
Author: jenkins 
Authored: Fri Nov 30 14:52:40 2018 +
Committer: jenkins 
Committed: Fri Nov 30 14:52:40 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |  456 +-
 checkstyle.rss  |4 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |4 +-
 devapidocs/index-all.html   |4 +
 .../hbase/backup/impl/BackupSystemTable.html|  218 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 ...ectionImplementation.MasterServiceState.html |   18 +-
 ...onImplementation.MasterServiceStubMaker.html |   10 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |   10 +-
 ...ectionImplementation.ServerErrorTracker.html |   20 +-
 .../hbase/client/ConnectionImplementation.html  |  104 +-
 .../hadoop/hbase/client/package-tree.html   |   28 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |2 +-
 .../hadoop/hbase/executor/package-tree.html |2 +-
 .../hadoop/hbase/filter/package-tree.html   |   10 +-
 .../hadoop/hbase/io/hfile/package-tree.html |6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../hbase/master/balancer/package-tree.html |2 +-
 .../hadoop/hbase/master/package-tree.html   |4 +-
 .../hbase/master/procedure/package-tree.html|4 +-
 .../hadoop/hbase/monitoring/package-tree.html   |2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   14 +-
 .../hadoop/hbase/procedure2/package-tree.html   |6 +-
 .../hadoop/hbase/quotas/package-tree.html   |6 +-
 .../regionserver/class-use/HStoreFile.html  |   15 +
 .../compactions/FIFOCompactionPolicy.html   |   30 +-
 .../hadoop/hbase/regionserver/package-tree.html |   16 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../hadoop/hbase/replication/package-tree.html  |2 +-
 .../replication/regionserver/package-tree.html  |2 +-
 .../hadoop/hbase/rest/model/package-tree.html   |2 +-
 .../hbase/security/access/package-tree.html |4 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../hadoop/hbase/thrift/package-tree.html   |2 +-
 .../apache/hadoop/hbase/util/package-tree.html  |8 +-
 .../org/apache/hadoop/hbase/Version.html|4 +-
 .../backup/impl/BackupSystemTable.WALItem.html  | 3544 +-
 .../hbase/backup/impl/BackupSystemTable.html| 3544 +-
 ...ectionImplementation.MasterServiceState.html | 2774 
 ...onImplementation.MasterServiceStubMaker.html | 2774 
 ...ntation.ServerErrorTracker.ServerErrors.html | 2774 
 ...ectionImplementation.ServerErrorTracker.html | 2774 
 .../hbase/client/ConnectionImplementation.html  | 2774 
 .../compactions/FIFOCompactionPolicy.html   |   84 +-
 .../hbase/util/JVMClusterUtil.MasterThread.html |  194 +-
 .../util/JVMClusterUtil.RegionServerThread.html |  194 +-
 .../hadoop/hbase/util/JVMClusterUtil.html   |  194 +-
 downloads.html  |4 +-
 export_control.html |4 +-
 index.html  |4 +-
 integration.html|4 +-
 issue-tracking.html |4 +-
 license.html|4 +-
 mail-lists.html |4 +-
 metrics.html|4 +-
 old_news.html   |4 +-
 plugin-management.html  |4 +-
 plugins.html|4 +-
 poweredbyhbase.html |4 +-
 project-info.html   |4 +-
 project-reports.html|4 +-
 project-summary.html|4 +-
 

hbase-site git commit: INFRA-10751 Empty commit

2018-11-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 32cb0f25b -> 2f0e8f921


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/2f0e8f92
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/2f0e8f92
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/2f0e8f92

Branch: refs/heads/asf-site
Commit: 2f0e8f921c7a478fae6f1c6ccc9905f1b25c4d42
Parents: 32cb0f2
Author: jenkins 
Authored: Fri Nov 30 14:53:01 2018 +
Committer: jenkins 
Committed: Fri Nov 30 14:53:01 2018 +

--

--




[21/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
index 981ebcd..b8c030f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
@@ -237,1811 +237,1817 @@
 229  }
 230
 231  private void waitForSystemTable(Admin 
admin, TableName tableName) throws IOException {
-232long TIMEOUT = 6;
-233long startTime = 
EnvironmentEdgeManager.currentTime();
-234while (!admin.tableExists(tableName) 
|| !admin.isTableAvailable(tableName)) {
-235  try {
-236Thread.sleep(100);
-237  } catch (InterruptedException e) 
{
-238  }
-239  if 
(EnvironmentEdgeManager.currentTime() - startTime  TIMEOUT) {
-240throw new IOException(
-241  "Failed to create backup system 
table " + tableName + " after " + TIMEOUT + "ms");
-242  }
-243}
-244LOG.debug("Backup table " + tableName 
+ " exists and available");
-245  }
-246
-247  @Override
-248  public void close() {
-249// do nothing
-250  }
-251
-252  /**
-253   * Updates status (state) of a backup 
session in backup system table table
-254   * @param info backup info
-255   * @throws IOException exception
-256   */
-257  public void updateBackupInfo(BackupInfo 
info) throws IOException {
-258if (LOG.isTraceEnabled()) {
-259  LOG.trace("update backup status in 
backup system table for: " + info.getBackupId()
-260+ " set status=" + 
info.getState());
-261}
-262try (Table table = 
connection.getTable(tableName)) {
-263  Put put = 
createPutForBackupInfo(info);
-264  table.put(put);
-265}
-266  }
-267
-268  /*
-269   * @param backupId the backup Id
-270   * @return Map of rows to path of bulk 
loaded hfile
-271   */
-272  Mapbyte[], String 
readBulkLoadedFiles(String backupId) throws IOException {
-273Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-274try (Table table = 
connection.getTable(bulkLoadTableName);
-275ResultScanner scanner = 
table.getScanner(scan)) {
-276  Result res = null;
-277  Mapbyte[], String map = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-278  while ((res = scanner.next()) != 
null) {
-279res.advance();
-280byte[] row = 
CellUtil.cloneRow(res.listCells().get(0));
-281for (Cell cell : res.listCells()) 
{
-282  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
-283
BackupSystemTable.PATH_COL.length) == 0) {
-284map.put(row, 
Bytes.toString(CellUtil.cloneValue(cell)));
-285  }
-286}
-287  }
-288  return map;
-289}
-290  }
-291
-292  /*
-293   * Used during restore
-294   * @param backupId the backup Id
-295   * @param sTableList List of tables
-296   * @return array of Map of family to 
List of Paths
-297   */
-298  public Mapbyte[], 
ListPath[] readBulkLoadedFiles(String backupId, 
ListTableName sTableList)
-299  throws IOException {
-300Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-301Mapbyte[], ListPath[] 
mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
-302try (Table table = 
connection.getTable(bulkLoadTableName);
-303ResultScanner scanner = 
table.getScanner(scan)) {
-304  Result res = null;
-305  while ((res = scanner.next()) != 
null) {
-306res.advance();
-307TableName tbl = null;
-308byte[] fam = null;
-309String path = null;
-310for (Cell cell : res.listCells()) 
{
-311  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0,
-312
BackupSystemTable.TBL_COL.length) == 0) {
-313tbl = 
TableName.valueOf(CellUtil.cloneValue(cell));
-314  } else if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0,
-315
BackupSystemTable.FAM_COL.length) == 0) {
-316fam = 
CellUtil.cloneValue(cell);
-317  } else if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
-318
BackupSystemTable.PATH_COL.length) == 0) {
-319path = 
Bytes.toString(CellUtil.cloneValue(cell));
-320  }
-321}
-322int srcIdx = 
IncrementalTableBackupClient.getIndex(tbl, sTableList);
-323if (srcIdx == -1) {
-324  // the table is not among the 
query
-325  continue;
-326}
-327if (mapForSrc[srcIdx] == null) 
{
-328  mapForSrc[srcIdx] = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-329}
-330ListPath files;
-331if 

[06/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index bc4887a..b12b756 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -1245,3081 +1245,3103 @@
 1237  }
 1238
 1239  /**
-1240   * Shutdown HBase mini cluster.  Does 
not shutdown zk or dfs if running.
-1241   */
-1242  public void shutdownMiniHBaseCluster() 
throws IOException {
-1243if (hbaseAdmin != null) {
-1244  hbaseAdmin.close();
-1245  hbaseAdmin = null;
-1246}
-1247if (this.connection != null) {
-1248  this.connection.close();
-1249  this.connection = null;
+1240   * Shutdown HBase mini cluster.Does 
not shutdown zk or dfs if running.
+1241   * @throws java.io.IOException in case 
command is unsuccessful
+1242   */
+1243  public void shutdownMiniHBaseCluster() 
throws IOException {
+1244cleanup();
+1245if (this.hbaseCluster != null) {
+1246  this.hbaseCluster.shutdown();
+1247  // Wait till hbase is down before 
going on to shutdown zk.
+1248  
this.hbaseCluster.waitUntilShutDown();
+1249  this.hbaseCluster = null;
 1250}
-1251// unset the configuration for MIN 
and MAX RS to start
-1252
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-1253
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-1254if (this.hbaseCluster != null) {
-1255  this.hbaseCluster.shutdown();
-1256  // Wait till hbase is down before 
going on to shutdown zk.
-1257  
this.hbaseCluster.waitUntilShutDown();
-1258  this.hbaseCluster = null;
-1259}
-1260if (zooKeeperWatcher != null) {
-1261  zooKeeperWatcher.close();
-1262  zooKeeperWatcher = null;
-1263}
-1264  }
-1265
-1266  /**
-1267   * Returns the path to the default 
root dir the minicluster uses. If codecreate/code
-1268   * is true, a new root directory path 
is fetched irrespective of whether it has been fetched
-1269   * before or not. If false, previous 
path is used.
-1270   * Note: this does not cause the root 
dir to be created.
-1271   * @return Fully qualified path for 
the default hbase root dir
-1272   * @throws IOException
-1273   */
-1274  public Path 
getDefaultRootDirPath(boolean create) throws IOException {
-1275if (!create) {
-1276  return getDataTestDirOnTestFS();
-1277} else {
-1278  return 
getNewDataTestDirOnTestFS();
-1279}
-1280  }
-1281
-1282  /**
-1283   * Same as {{@link 
HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
-1284   * except that 
codecreate/code flag is false.
-1285   * Note: this does not cause the root 
dir to be created.
-1286   * @return Fully qualified path for 
the default hbase root dir
-1287   * @throws IOException
-1288   */
-1289  public Path getDefaultRootDirPath() 
throws IOException {
-1290return 
getDefaultRootDirPath(false);
-1291  }
-1292
-1293  /**
-1294   * Creates an hbase rootdir in user 
home directory.  Also creates hbase
-1295   * version file.  Normally you won't 
make use of this method.  Root hbasedir
-1296   * is created for you as part of mini 
cluster startup.  You'd only use this
-1297   * method if you were doing manual 
operation.
-1298   * @param create This flag decides 
whether to get a new
-1299   * root or data directory path or not, 
if it has been fetched already.
-1300   * Note : Directory will be made 
irrespective of whether path has been fetched or not.
-1301   * If directory already exists, it 
will be overwritten
-1302   * @return Fully qualified path to 
hbase root dir
-1303   * @throws IOException
-1304   */
-1305  public Path createRootDir(boolean 
create) throws IOException {
-1306FileSystem fs = 
FileSystem.get(this.conf);
-1307Path hbaseRootdir = 
getDefaultRootDirPath(create);
-1308FSUtils.setRootDir(this.conf, 
hbaseRootdir);
-1309fs.mkdirs(hbaseRootdir);
-1310FSUtils.setVersion(fs, 
hbaseRootdir);
-1311return hbaseRootdir;
-1312  }
-1313
-1314  /**
-1315   * Same as {@link 
HBaseTestingUtility#createRootDir(boolean create)}
-1316   * except that 
codecreate/code flag is false.
-1317   * @return Fully qualified path to 
hbase root dir
-1318   * @throws IOException
-1319   */
-1320  public Path createRootDir() throws 
IOException {
-1321return createRootDir(false);
-1322  }
-1323
-1324  /**
-1325   * Creates a hbase walDir in the 
user's home directory.
-1326   * Normally you won't make use of this 
method. Root hbaseWALDir
-1327 

[01/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site a5c72fccf -> 32cb0f25b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.html
index af918c0..56420c8 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.html
@@ -30,176 +30,239 @@
 022import java.io.IOException;
 023import java.util.List;
 024import 
java.util.concurrent.ThreadLocalRandom;
-025import 
org.apache.hadoop.conf.Configuration;
-026import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-027import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-028import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.MiniHBaseCluster;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
-033import 
org.apache.hadoop.hbase.client.Admin;
-034import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-035import 
org.apache.hadoop.hbase.client.Put;
-036import 
org.apache.hadoop.hbase.client.Table;
-037import 
org.apache.hadoop.hbase.client.TableDescriptor;
-038import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-039import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-040import 
org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
-041import 
org.apache.hadoop.hbase.regionserver.HRegion;
-042import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-043import 
org.apache.hadoop.hbase.regionserver.HStore;
-044import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-045import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
-046import 
org.apache.hadoop.hbase.util.Bytes;
-047import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-049import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
-050import 
org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
-051import org.junit.AfterClass;
-052import org.junit.BeforeClass;
-053import org.junit.ClassRule;
-054import org.junit.Rule;
-055import org.junit.Test;
-056import 
org.junit.experimental.categories.Category;
-057import 
org.junit.rules.ExpectedException;
-058
-059@Category({ RegionServerTests.class, 
MediumTests.class })
-060public class TestFIFOCompactionPolicy {
+025
+026import 
org.apache.hadoop.conf.Configuration;
+027import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+028import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+029import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+030import 
org.apache.hadoop.hbase.HConstants;
+031import 
org.apache.hadoop.hbase.MiniHBaseCluster;
+032import 
org.apache.hadoop.hbase.TableName;
+033import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+034import 
org.apache.hadoop.hbase.client.Admin;
+035import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+036import 
org.apache.hadoop.hbase.client.Put;
+037import 
org.apache.hadoop.hbase.client.Table;
+038import 
org.apache.hadoop.hbase.client.TableDescriptor;
+039import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+040import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+041import 
org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+042import 
org.apache.hadoop.hbase.regionserver.HRegion;
+043import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+044import 
org.apache.hadoop.hbase.regionserver.HStore;
+045import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
+046import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+047import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
+048import 
org.apache.hadoop.hbase.util.Bytes;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
+050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+051import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
+052import 
org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
+053import org.junit.AfterClass;
+054import org.junit.Assert;
+055import org.junit.BeforeClass;
+056import org.junit.ClassRule;
+057import org.junit.Rule;
+058import org.junit.Test;
+059import 
org.junit.experimental.categories.Category;
+060import 
org.junit.rules.ExpectedException;
 061
-062  @ClassRule
-063  public static final HBaseClassTestRule 
CLASS_RULE =
-064  
HBaseClassTestRule.forClass(TestFIFOCompactionPolicy.class);
-065
-066  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-067
-068 

[15/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-737  throws IOException {
-738RegionLocations locations =
-739  relocateRegion(tableName, row, 

[25/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
index 3212827..db62a89 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
@@ -1373,7 +1373,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 close
-publicvoidclose()
+publicvoidclose()
 
 Specified by:
 https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true#close--;
 title="class or interface in java.io">closein 
interfacehttps://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
@@ -1388,7 +1388,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 updateBackupInfo
-publicvoidupdateBackupInfo(BackupInfoinfo)
+publicvoidupdateBackupInfo(BackupInfoinfo)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Updates status (state) of a backup session in backup system 
table table
 
@@ -1405,7 +1405,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 readBulkLoadedFiles
-https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringreadBulkLoadedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId)
+https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringreadBulkLoadedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1419,7 +1419,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 readBulkLoadedFiles
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path[]readBulkLoadedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId,
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path[]readBulkLoadedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableNamesTableList)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -1434,7 +1434,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 deleteBackupInfo
-publicvoiddeleteBackupInfo(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId)
+publicvoiddeleteBackupInfo(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deletes backup status from backup system table table
 
@@ -1451,7 +1451,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 writePathsPostBulkLoad
-publicvoidwritePathsPostBulkLoad(TableNametabName,
+publicvoidwritePathsPostBulkLoad(TableNametabName,
byte[]region,
   

[03/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index bc4887a..b12b756 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -1245,3081 +1245,3103 @@
 1237  }
 1238
 1239  /**
-1240   * Shutdown HBase mini cluster.  Does 
not shutdown zk or dfs if running.
-1241   */
-1242  public void shutdownMiniHBaseCluster() 
throws IOException {
-1243if (hbaseAdmin != null) {
-1244  hbaseAdmin.close();
-1245  hbaseAdmin = null;
-1246}
-1247if (this.connection != null) {
-1248  this.connection.close();
-1249  this.connection = null;
+1240   * Shutdown HBase mini cluster.Does 
not shutdown zk or dfs if running.
+1241   * @throws java.io.IOException in case 
command is unsuccessful
+1242   */
+1243  public void shutdownMiniHBaseCluster() 
throws IOException {
+1244cleanup();
+1245if (this.hbaseCluster != null) {
+1246  this.hbaseCluster.shutdown();
+1247  // Wait till hbase is down before 
going on to shutdown zk.
+1248  
this.hbaseCluster.waitUntilShutDown();
+1249  this.hbaseCluster = null;
 1250}
-1251// unset the configuration for MIN 
and MAX RS to start
-1252
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-1253
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-1254if (this.hbaseCluster != null) {
-1255  this.hbaseCluster.shutdown();
-1256  // Wait till hbase is down before 
going on to shutdown zk.
-1257  
this.hbaseCluster.waitUntilShutDown();
-1258  this.hbaseCluster = null;
-1259}
-1260if (zooKeeperWatcher != null) {
-1261  zooKeeperWatcher.close();
-1262  zooKeeperWatcher = null;
-1263}
-1264  }
-1265
-1266  /**
-1267   * Returns the path to the default 
root dir the minicluster uses. If codecreate/code
-1268   * is true, a new root directory path 
is fetched irrespective of whether it has been fetched
-1269   * before or not. If false, previous 
path is used.
-1270   * Note: this does not cause the root 
dir to be created.
-1271   * @return Fully qualified path for 
the default hbase root dir
-1272   * @throws IOException
-1273   */
-1274  public Path 
getDefaultRootDirPath(boolean create) throws IOException {
-1275if (!create) {
-1276  return getDataTestDirOnTestFS();
-1277} else {
-1278  return 
getNewDataTestDirOnTestFS();
-1279}
-1280  }
-1281
-1282  /**
-1283   * Same as {{@link 
HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
-1284   * except that 
codecreate/code flag is false.
-1285   * Note: this does not cause the root 
dir to be created.
-1286   * @return Fully qualified path for 
the default hbase root dir
-1287   * @throws IOException
-1288   */
-1289  public Path getDefaultRootDirPath() 
throws IOException {
-1290return 
getDefaultRootDirPath(false);
-1291  }
-1292
-1293  /**
-1294   * Creates an hbase rootdir in user 
home directory.  Also creates hbase
-1295   * version file.  Normally you won't 
make use of this method.  Root hbasedir
-1296   * is created for you as part of mini 
cluster startup.  You'd only use this
-1297   * method if you were doing manual 
operation.
-1298   * @param create This flag decides 
whether to get a new
-1299   * root or data directory path or not, 
if it has been fetched already.
-1300   * Note : Directory will be made 
irrespective of whether path has been fetched or not.
-1301   * If directory already exists, it 
will be overwritten
-1302   * @return Fully qualified path to 
hbase root dir
-1303   * @throws IOException
-1304   */
-1305  public Path createRootDir(boolean 
create) throws IOException {
-1306FileSystem fs = 
FileSystem.get(this.conf);
-1307Path hbaseRootdir = 
getDefaultRootDirPath(create);
-1308FSUtils.setRootDir(this.conf, 
hbaseRootdir);
-1309fs.mkdirs(hbaseRootdir);
-1310FSUtils.setVersion(fs, 
hbaseRootdir);
-1311return hbaseRootdir;
-1312  }
-1313
-1314  /**
-1315   * Same as {@link 
HBaseTestingUtility#createRootDir(boolean create)}
-1316   * except that 
codecreate/code flag is false.
-1317   * @return Fully qualified path to 
hbase root dir
-1318   * @throws IOException
-1319   */
-1320  public Path createRootDir() throws 
IOException {
-1321return createRootDir(false);
-1322  }
-1323
-1324  /**
-1325   * Creates a hbase walDir in the 
user's home directory.
-1326   * Normally you won't make use of this 
method. Root hbaseWALDir
-1327   * is created for you as part of mini 
cluster startup. You'd only use this
-1328   * method if you were doing manual 
operation.
-1329   *
-1330   * @return Fully qualified 

[14/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
index 1876095..b4aeccc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.html
@@ -104,40 +104,56 @@
 096return 
hasExpiredStores(storeFiles);
 097  }
 098
-099  private boolean 
hasExpiredStores(CollectionHStoreFile files) {
-100long currentTime = 
EnvironmentEdgeManager.currentTime();
-101for(HStoreFile sf: files){
-102  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
-103  long maxTs = 
sf.getReader().getMaxTimestamp();
-104  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
-105  if (maxTtl == Long.MAX_VALUE
-106  || (currentTime - maxTtl  
maxTs)){
-107continue; 
-108  } else{
-109return true;
-110  }
-111}
-112return false;
-113  }
-114
-115  private CollectionHStoreFile 
getExpiredStores(CollectionHStoreFile files,
-116  CollectionHStoreFile 
filesCompacting) {
-117long currentTime = 
EnvironmentEdgeManager.currentTime();
-118CollectionHStoreFile 
expiredStores = new ArrayList();
-119for(HStoreFile sf: files){
-120  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
-121  long maxTs = 
sf.getReader().getMaxTimestamp();
-122  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
-123  if (maxTtl == Long.MAX_VALUE
-124  || (currentTime - maxTtl  
maxTs)){
-125continue; 
-126  } else if(filesCompacting == null 
|| !filesCompacting.contains(sf)){
-127expiredStores.add(sf);
-128  }
-129}
-130return expiredStores;
-131  }
-132}
+099  /**
+100   * The FIFOCompactionPolicy only choose 
those TTL expired HFiles as the compaction candidates. So
+101   * if all HFiles are TTL expired, then 
the compaction will generate a new empty HFile. While its
+102   * max timestamp will be 
Long.MAX_VALUE. If not considered separately, the HFile will never be
+103   * archived because its TTL will be 
never expired. So we'll check the empty store file separately.
+104   * (See HBASE-21504)
+105   */
+106  private boolean 
isEmptyStoreFile(HStoreFile sf) {
+107return sf.getReader().getEntries() == 
0;
+108  }
+109
+110  private boolean 
hasExpiredStores(CollectionHStoreFile files) {
+111long currentTime = 
EnvironmentEdgeManager.currentTime();
+112for (HStoreFile sf : files) {
+113  if (isEmptyStoreFile(sf)) {
+114return true;
+115  }
+116  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
+117  long maxTs = 
sf.getReader().getMaxTimestamp();
+118  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
+119  if (maxTtl == Long.MAX_VALUE || 
(currentTime - maxTtl  maxTs)) {
+120continue;
+121  } else {
+122return true;
+123  }
+124}
+125return false;
+126  }
+127
+128  private CollectionHStoreFile 
getExpiredStores(CollectionHStoreFile files,
+129  CollectionHStoreFile 
filesCompacting) {
+130long currentTime = 
EnvironmentEdgeManager.currentTime();
+131CollectionHStoreFile 
expiredStores = new ArrayList();
+132for (HStoreFile sf : files) {
+133  if (isEmptyStoreFile(sf)) {
+134expiredStores.add(sf);
+135continue;
+136  }
+137  // Check MIN_VERSIONS is in HStore 
removeUnneededFiles
+138  long maxTs = 
sf.getReader().getMaxTimestamp();
+139  long maxTtl = 
storeConfigInfo.getStoreFileTtl();
+140  if (maxTtl == Long.MAX_VALUE || 
(currentTime - maxTtl  maxTs)) {
+141continue;
+142  } else if (filesCompacting == null 
|| !filesCompacting.contains(sf)) {
+143expiredStores.add(sf);
+144  }
+145}
+146return expiredStores;
+147  }
+148}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
index 67f0fc6..9870370 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.MasterThread.html
@@ -257,99 +257,109 @@
 249  // Do backups first.
 250  JVMClusterUtil.MasterThread 
activeMaster = null;
 251  for (JVMClusterUtil.MasterThread t 
: masters) {
-252if 

[22/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 5f46c6b..af25c37 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -18,9 +18,9 @@
 010  public static final String version = 
"3.0.0-SNAPSHOT";
 011  public static final String revision = 
"";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Thu 
Nov 29 14:44:16 UTC 2018";
+013  public static final String date = "Fri 
Nov 30 14:44:18 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "76c50b67ef7b146f8930329762e594c4";
+015  public static final String srcChecksum 
= "50e37972f630f5708cca7f47f93c0b5e";
 016}
 
 



[16/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-737  throws 

[09/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 234850f..8371bd2 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":9,"i17":10,"i18":10,"i19":9,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":41,"i29":41,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":9,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":42,"i72":42,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":9,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":9,"i101":9,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i110":10,"i
 
111":10,"i112":10,"i113":10,"i114":9,"i115":9,"i116":9,"i117":42,"i118":10,"i119":10,"i120":10,"i121":9,"i122":42,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":9,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":9,"i150":9,"i151":9,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":9,"i158":9,"i159":10,"i160":9,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":9,"i168":9,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":42,"i178":10,"i179":42,"i180":42,"i181":42,"i182":42,"i183":42,"i184":42,"i185":42,"i186":42,"i187":42,"i188":42,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":42,"i197":42,"i198":42,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10,"i
 
212":10,"i213":10,"i214":9,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":9,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":9,"i18":10,"i19":10,"i20":9,"i21":10,"i22":10,"i23":42,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":41,"i30":41,"i31":10,"i32":10,"i33":10,"i34":42,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":10,"i48":9,"i49":9,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":42,"i73":42,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":9,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":9,"i102":9,"i103":10,"i104":9,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i110":10,"i
 
111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":9,"i117":9,"i118":42,"i119":10,"i120":10,"i121":10,"i122":9,"i123":42,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":9,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":9,"i152":9,"i153":9,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":9,"i160":9,"i161":10,"i162":9,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":9,"i170":9,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":42,"i180":10,"i181":42,"i182":42,"i183":42,"i184":42,"i185":42,"i186":42,"i187":42,"i188":42,"i189":42,"i190":42,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":42,"i199":42,"i200":42,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10,"i
 
212":10,"i213":10,"i214":10,"i215":10,"i216":9,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10};
 var tabs = {65535:["t0","All 

[18/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-737

[05/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
index bc4887a..b12b756 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
@@ -1245,3081 +1245,3103 @@
 1237  }
 1238
 1239  /**
-1240   * Shutdown HBase mini cluster.  Does 
not shutdown zk or dfs if running.
-1241   */
-1242  public void shutdownMiniHBaseCluster() 
throws IOException {
-1243if (hbaseAdmin != null) {
-1244  hbaseAdmin.close();
-1245  hbaseAdmin = null;
-1246}
-1247if (this.connection != null) {
-1248  this.connection.close();
-1249  this.connection = null;
+1240   * Shutdown HBase mini cluster.Does 
not shutdown zk or dfs if running.
+1241   * @throws java.io.IOException in case 
command is unsuccessful
+1242   */
+1243  public void shutdownMiniHBaseCluster() 
throws IOException {
+1244cleanup();
+1245if (this.hbaseCluster != null) {
+1246  this.hbaseCluster.shutdown();
+1247  // Wait till hbase is down before 
going on to shutdown zk.
+1248  
this.hbaseCluster.waitUntilShutDown();
+1249  this.hbaseCluster = null;
 1250}
-1251// unset the configuration for MIN 
and MAX RS to start
-1252
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-1253
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-1254if (this.hbaseCluster != null) {
-1255  this.hbaseCluster.shutdown();
-1256  // Wait till hbase is down before 
going on to shutdown zk.
-1257  
this.hbaseCluster.waitUntilShutDown();
-1258  this.hbaseCluster = null;
-1259}
-1260if (zooKeeperWatcher != null) {
-1261  zooKeeperWatcher.close();
-1262  zooKeeperWatcher = null;
-1263}
-1264  }
-1265
-1266  /**
-1267   * Returns the path to the default 
root dir the minicluster uses. If codecreate/code
-1268   * is true, a new root directory path 
is fetched irrespective of whether it has been fetched
-1269   * before or not. If false, previous 
path is used.
-1270   * Note: this does not cause the root 
dir to be created.
-1271   * @return Fully qualified path for 
the default hbase root dir
-1272   * @throws IOException
-1273   */
-1274  public Path 
getDefaultRootDirPath(boolean create) throws IOException {
-1275if (!create) {
-1276  return getDataTestDirOnTestFS();
-1277} else {
-1278  return 
getNewDataTestDirOnTestFS();
-1279}
-1280  }
-1281
-1282  /**
-1283   * Same as {{@link 
HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
-1284   * except that 
codecreate/code flag is false.
-1285   * Note: this does not cause the root 
dir to be created.
-1286   * @return Fully qualified path for 
the default hbase root dir
-1287   * @throws IOException
-1288   */
-1289  public Path getDefaultRootDirPath() 
throws IOException {
-1290return 
getDefaultRootDirPath(false);
-1291  }
-1292
-1293  /**
-1294   * Creates an hbase rootdir in user 
home directory.  Also creates hbase
-1295   * version file.  Normally you won't 
make use of this method.  Root hbasedir
-1296   * is created for you as part of mini 
cluster startup.  You'd only use this
-1297   * method if you were doing manual 
operation.
-1298   * @param create This flag decides 
whether to get a new
-1299   * root or data directory path or not, 
if it has been fetched already.
-1300   * Note : Directory will be made 
irrespective of whether path has been fetched or not.
-1301   * If directory already exists, it 
will be overwritten
-1302   * @return Fully qualified path to 
hbase root dir
-1303   * @throws IOException
-1304   */
-1305  public Path createRootDir(boolean 
create) throws IOException {
-1306FileSystem fs = 
FileSystem.get(this.conf);
-1307Path hbaseRootdir = 
getDefaultRootDirPath(create);
-1308FSUtils.setRootDir(this.conf, 
hbaseRootdir);
-1309fs.mkdirs(hbaseRootdir);
-1310FSUtils.setVersion(fs, 
hbaseRootdir);
-1311return hbaseRootdir;
-1312  }
-1313
-1314  /**
-1315   * Same as {@link 
HBaseTestingUtility#createRootDir(boolean create)}
-1316   * except that 
codecreate/code flag is false.
-1317   * @return Fully qualified path to 
hbase root dir
-1318   * @throws IOException
-1319   */
-1320  public Path createRootDir() throws 
IOException {
-1321return createRootDir(false);
-1322  }
-1323
-1324  /**
-1325   * Creates a hbase walDir in the 
user's home directory.
-1326   * Normally you won't make use of this 
method. Root hbaseWALDir
-1327   * is created for you as part of mini 
cluster startup. You'd only use this
-1328   * method if you 

[10/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/constant-values.html
--
diff --git a/testdevapidocs/constant-values.html 
b/testdevapidocs/constant-values.html
index d3d862a..b479472 100644
--- a/testdevapidocs/constant-values.html
+++ b/testdevapidocs/constant-values.html
@@ -1771,6 +1771,39 @@
 
 
 
+org.apache.hadoop.hbase.TestHBaseTestingUtility
+
+Modifier and Type
+Constant Field
+Value
+
+
+
+
+
+privatestaticfinalint
+NUMREGIONS
+10
+
+
+
+
+privatestaticfinalint
+NUMROWS
+100
+
+
+
+
+privatestaticfinalint
+NUMTABLES
+1
+
+
+
+
+
+
 org.apache.hadoop.hbase.TestIOFencing
 
 Modifier and Type

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 300b530..f979b24 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -6911,6 +6911,8 @@
 
 cleanup()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestSecureExport
 
+cleanup()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
+
 cleanup()
 - Static method in class org.apache.hadoop.hbase.http.jmx.TestJMXJsonServlet
 
 cleanup()
 - Static method in class org.apache.hadoop.hbase.http.TestHttpServer
@@ -23181,6 +23183,10 @@
 
 killMetaRs
 - Variable in class org.apache.hadoop.hbase.chaos.factories.UnbalanceMonkeyFactory
 
+killMiniHBaseCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
+
+Abruptly Shutdown HBase mini cluster.
+
 killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.chaos.actions.Action
 
 killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.DistributedHBaseCluster
@@ -28942,6 +28948,8 @@
 
 numRegions
 - Variable in class org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation
 
+NUMREGIONS
 - Static variable in class org.apache.hadoop.hbase.TestHBaseTestingUtility
+
 numRegionServer
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestCoprocessorServiceBackwardCompatibility.DummyCoprocessorService
 
 numRegionServers
 - Static variable in class org.apache.hadoop.hbase.replication.TestReplicationEndpoint
@@ -29006,6 +29014,8 @@
 
 numRows
 - Static variable in class org.apache.hadoop.hbase.rest.TestScannersWithFilters
 
+NUMROWS
 - Static variable in class org.apache.hadoop.hbase.TestHBaseTestingUtility
+
 numRowsLoadedWithExp1
 - Variable in class org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify
 
 numRowsLoadedWithExp2
 - Variable in class org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify
@@ -29054,6 +29064,8 @@
 
 numTables
 - Variable in class org.apache.hadoop.hbase.master.procedure.MasterProcedureSchedulerPerformanceEvaluation
 
+NUMTABLES
 - Static variable in class org.apache.hadoop.hbase.TestHBaseTestingUtility
+
 numTables
 - Variable in class org.apache.hadoop.hbase.util.LoadTestTool
 
 numThreads
 - Variable in class org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover
@@ -39951,7 +39963,7 @@
 
 shutdownMiniHBaseCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
 
-Shutdown HBase mini cluster.
+Shutdown HBase mini cluster.Does not shutdown zk or dfs if 
running.
 
 shutdownMiniMapReduceCluster()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
 
@@ -51809,6 +51821,10 @@
 
 TestFIFOCompactionPolicy()
 - Constructor for class org.apache.hadoop.hbase.regionserver.compactions.TestFIFOCompactionPolicy
 
+testFIFOCompactionPolicyExpiredEmptyHFiles()
 - Method in class org.apache.hadoop.hbase.regionserver.compactions.TestFIFOCompactionPolicy
+
+Unit test for HBASE-21504
+
 TestFifoRpcScheduler - Class in org.apache.hadoop.hbase.ipc
 
 TestFifoRpcScheduler()
 - Constructor for class org.apache.hadoop.hbase.ipc.TestFifoRpcScheduler
@@ -55066,6 +55082,8 @@
 
 testKeyWrapping()
 - Method in class org.apache.hadoop.hbase.security.TestEncryptionUtil
 
+testKillMiniHBaseCluster()
 - Method in class org.apache.hadoop.hbase.TestHBaseTestingUtility
+
 testKillRS()
 - Method in class org.apache.hadoop.hbase.replication.TestSerialReplicationFailover
 
 testKillRS()
 - Method in class org.apache.hadoop.hbase.rsgroup.TestRSGroupsKillRS

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index 53aba37..6fa04ad 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ 

hbase git commit: HBASE-21518 TestMasterFailoverWithProcedures is flaky

2018-11-30 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 bba29961e -> 4ebbfa3d6


HBASE-21518 TestMasterFailoverWithProcedures is flaky

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ebbfa3d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ebbfa3d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ebbfa3d

Branch: refs/heads/branch-2.1
Commit: 4ebbfa3d60c86fa7cac41ad582ab1b6edc990582
Parents: bba2996
Author: Peter Somogyi 
Authored: Thu Nov 29 18:04:53 2018 +0100
Committer: Peter Somogyi 
Committed: Fri Nov 30 09:14:20 2018 +0100

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 24 ++--
 1 file changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4ebbfa3d/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 8c92f66..7518d65 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -249,14 +249,24 @@ public class JVMClusterUtil {
   // Do backups first.
   JVMClusterUtil.MasterThread activeMaster = null;
   for (JVMClusterUtil.MasterThread t : masters) {
-if (!t.master.isActiveMaster()) {
-  try {
-t.master.stopMaster();
-  } catch (IOException e) {
-LOG.error("Exception occurred while stopping master", e);
+// Master was killed but could be still considered as active. Check 
first if it is stopped.
+if (!t.master.isStopped()) {
+  if (!t.master.isActiveMaster()) {
+try {
+  t.master.stopMaster();
+} catch (IOException e) {
+  LOG.error("Exception occurred while stopping master", e);
+}
+LOG.info("Stopped backup Master {} is stopped: {}",
+t.master.hashCode(), t.master.isStopped());
+  } else {
+if (activeMaster != null) {
+  LOG.warn("Found more than 1 active master, hash {}", 
activeMaster.master.hashCode());
+}
+activeMaster = t;
+LOG.debug("Found active master hash={}, stopped={}",
+t.master.hashCode(), t.master.isStopped());
   }
-} else {
-  activeMaster = t;
 }
   }
   // Do active after.



hbase git commit: HBASE-21518 TestMasterFailoverWithProcedures is flaky

2018-11-30 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1be42994a -> 9d5df33be


HBASE-21518 TestMasterFailoverWithProcedures is flaky

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d5df33b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d5df33b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d5df33b

Branch: refs/heads/branch-2
Commit: 9d5df33be50999af87002fdf6b3fcf9f5fac7471
Parents: 1be4299
Author: Peter Somogyi 
Authored: Thu Nov 29 18:04:53 2018 +0100
Committer: Peter Somogyi 
Committed: Fri Nov 30 09:13:38 2018 +0100

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 24 ++--
 1 file changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9d5df33b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 8c92f66..7518d65 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -249,14 +249,24 @@ public class JVMClusterUtil {
   // Do backups first.
   JVMClusterUtil.MasterThread activeMaster = null;
   for (JVMClusterUtil.MasterThread t : masters) {
-if (!t.master.isActiveMaster()) {
-  try {
-t.master.stopMaster();
-  } catch (IOException e) {
-LOG.error("Exception occurred while stopping master", e);
+// Master was killed but could be still considered as active. Check 
first if it is stopped.
+if (!t.master.isStopped()) {
+  if (!t.master.isActiveMaster()) {
+try {
+  t.master.stopMaster();
+} catch (IOException e) {
+  LOG.error("Exception occurred while stopping master", e);
+}
+LOG.info("Stopped backup Master {} is stopped: {}",
+t.master.hashCode(), t.master.isStopped());
+  } else {
+if (activeMaster != null) {
+  LOG.warn("Found more than 1 active master, hash {}", 
activeMaster.master.hashCode());
+}
+activeMaster = t;
+LOG.debug("Found active master hash={}, stopped={}",
+t.master.hashCode(), t.master.isStopped());
   }
-} else {
-  activeMaster = t;
 }
   }
   // Do active after.



hbase git commit: HBASE-21518 TestMasterFailoverWithProcedures is flaky

2018-11-30 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/master d42e0ade1 -> dfeab9f5c


HBASE-21518 TestMasterFailoverWithProcedures is flaky

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfeab9f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfeab9f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfeab9f5

Branch: refs/heads/master
Commit: dfeab9f5c968625ac1c642c53c721eb5e81068c0
Parents: d42e0ad
Author: Peter Somogyi 
Authored: Thu Nov 29 18:04:53 2018 +0100
Committer: Peter Somogyi 
Committed: Fri Nov 30 09:12:06 2018 +0100

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 24 ++--
 1 file changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfeab9f5/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 8c92f66..7518d65 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -249,14 +249,24 @@ public class JVMClusterUtil {
   // Do backups first.
   JVMClusterUtil.MasterThread activeMaster = null;
   for (JVMClusterUtil.MasterThread t : masters) {
-if (!t.master.isActiveMaster()) {
-  try {
-t.master.stopMaster();
-  } catch (IOException e) {
-LOG.error("Exception occurred while stopping master", e);
+// Master was killed but could be still considered as active. Check 
first if it is stopped.
+if (!t.master.isStopped()) {
+  if (!t.master.isActiveMaster()) {
+try {
+  t.master.stopMaster();
+} catch (IOException e) {
+  LOG.error("Exception occurred while stopping master", e);
+}
+LOG.info("Stopped backup Master {} is stopped: {}",
+t.master.hashCode(), t.master.isStopped());
+  } else {
+if (activeMaster != null) {
+  LOG.warn("Found more than 1 active master, hash {}", 
activeMaster.master.hashCode());
+}
+activeMaster = t;
+LOG.debug("Found active master hash={}, stopped={}",
+t.master.hashCode(), t.master.isStopped());
   }
-} else {
-  activeMaster = t;
 }
   }
   // Do active after.