hbase git commit: HBASE-19083 Introduce a new log writer which can write to two HDFSes

2018-01-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 [created] cd2c54e0f


HBASE-19083 Introduce a new log writer which can write to two HDFSes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd2c54e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd2c54e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd2c54e0

Branch: refs/heads/HBASE-19064
Commit: cd2c54e0f34eae4a9341ec9bbd7f01d3e34ea32d
Parents: 71a1192
Author: zhangduo 
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 21:08:10 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  26 ++--
 .../regionserver/wal/CombinedAsyncWriter.java   | 134 ++
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  67 +
 .../wal/AbstractTestProtobufLog.java| 111 +++
 .../regionserver/wal/ProtobufLogTestHelper.java |  99 ++
 .../regionserver/wal/TestAsyncProtobufLog.java  |  44 ++
 .../wal/TestCombinedAsyncWriter.java| 136 +++
 .../hbase/regionserver/wal/TestProtobufLog.java |  15 +-
 .../regionserver/wal/WriterOverAsyncWriter.java |  63 +
 9 files changed, 541 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd2c54e0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index be8665b..50d1eac 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -620,8 +620,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 }
   }
 
-  @Override
-  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+  protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) 
throws IOException {
 boolean overwrite = false;
 for (int retry = 0;; retry++) {
   try {
@@ -664,6 +663,11 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
   createMaxRetries + " time(s)");
   }
 
+  @Override
+  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+return createAsyncWriter(fs, path);
+  }
+
   private void waitForSafePoint() {
 consumeLock.lock();
 try {
@@ -706,21 +710,21 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 } finally {
   consumeLock.unlock();
 }
-return executeClose(closeExecutor, oldWriter);
+return executeClose(oldWriter);
   }
 
   @Override
   protected void doShutdown() throws IOException {
 waitForSafePoint();
-executeClose(closeExecutor, writer);
+executeClose(writer);
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {
-LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but"
-  + " the close of async writer doesn't complete."
-  + "Please check the status of underlying filesystem"
-  + " or increase the wait time by the config \""
-  + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + "\"");
+LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" 
+
+  " the close of async writer doesn't complete." +
+  "Please check the status of underlying filesystem" +
+  " or increase the wait time by the config \"" + 
ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS +
+  "\"");
   }
 } catch (InterruptedException e) {
   LOG.error("The wait for close of async writer is interrupted");
@@ -733,7 +737,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 }
   }
 
-  private static long executeClose(ExecutorService closeExecutor, AsyncWriter 
writer) {
+  protected final long executeClose(AsyncWriter writer) {
 long fileLength;
 if (writer != null) {
   fileLength = writer.getLength();
@@ -741,7 +745,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 try {
   writer.close();
 } catch (IOException e) {
-  LOG.warn("close old writer failed", e);
+  LOG.warn("close writer failed", e);
 }
   });
 } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd2c54e0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
 

[1/2] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL

2018-01-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0885fe161 -> 814d08a2d


http://git-wip-us.apache.org/repos/asf/hbase/blob/814d08a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
index 057b9bf..68fa33f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import static org.junit.Assert.assertEquals;
@@ -23,20 +22,21 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
@@ -52,8 +52,10 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
@@ -63,7 +65,7 @@ import org.junit.runners.Parameterized.Parameters;
  * Tests for WAL write durability
  */
 @RunWith(Parameterized.class)
-@Category({RegionServerTests.class, MediumTests.class})
+@Category({ RegionServerTests.class, MediumTests.class })
 public class TestDurability {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static FileSystem FS;
@@ -78,6 +80,9 @@ public class TestDurability {
   @Parameter
   public String walProvider;
 
+  @Rule
+  public TestName name = new TestName();
+
   @Parameters(name = "{index}: provider={0}")
   public static Iterable data() {
 return Arrays.asList(new Object[] { "defaultProvider" }, new Object[] { 
"asyncfs" });
@@ -111,12 +116,12 @@ public class TestDurability {
 
   @Test
   public void testDurability() throws Exception {
-final WALFactory wals = new WALFactory(CONF, null, 
ServerName.valueOf("TestDurability",
-16010, System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestDurability");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "region", wal, 
Durability.USE_DEFAULT);
-HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, 
Durability.ASYNC_WAL);
+WALFactory wals = new WALFactory(CONF, null,
+ServerName.valueOf("TestDurability", 16010, 
System.currentTimeMillis()).toString());
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
+HRegion deferredRegion = createHRegion(region.getTableDescriptor(), 
region.getRegionInfo(),
+  "deferredRegion", wal, Durability.ASYNC_WAL);
 
 region.put(newPut(null));
 verifyWALCount(wals, wal, 1);
@@ -175,11 +180,10 @@ public class TestDurability {
 byte[] col3 = Bytes.toBytes("col3");
 
 // Setting up region
-final WALFactory wals = new WALFactory(CONF, null,
+WALFactory wals = new WALFactory(CONF, null,
 ServerName.valueOf("TestIncrement", 16010, 
System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestIncrement");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "increment", wal, 
Durability.USE_DEFAULT);
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
 
 // col1: amount = 0, 1 write back to WAL
 Increment inc1 = new 

[1/2] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL

2018-01-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 62a818894 -> 71a1192d6


http://git-wip-us.apache.org/repos/asf/hbase/blob/71a1192d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
index 057b9bf..68fa33f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import static org.junit.Assert.assertEquals;
@@ -23,20 +22,21 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
@@ -52,8 +52,10 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
@@ -63,7 +65,7 @@ import org.junit.runners.Parameterized.Parameters;
  * Tests for WAL write durability
  */
 @RunWith(Parameterized.class)
-@Category({RegionServerTests.class, MediumTests.class})
+@Category({ RegionServerTests.class, MediumTests.class })
 public class TestDurability {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static FileSystem FS;
@@ -78,6 +80,9 @@ public class TestDurability {
   @Parameter
   public String walProvider;
 
+  @Rule
+  public TestName name = new TestName();
+
   @Parameters(name = "{index}: provider={0}")
   public static Iterable data() {
 return Arrays.asList(new Object[] { "defaultProvider" }, new Object[] { 
"asyncfs" });
@@ -111,12 +116,12 @@ public class TestDurability {
 
   @Test
   public void testDurability() throws Exception {
-final WALFactory wals = new WALFactory(CONF, null, 
ServerName.valueOf("TestDurability",
-16010, System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestDurability");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "region", wal, 
Durability.USE_DEFAULT);
-HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, 
Durability.ASYNC_WAL);
+WALFactory wals = new WALFactory(CONF, null,
+ServerName.valueOf("TestDurability", 16010, 
System.currentTimeMillis()).toString());
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
+HRegion deferredRegion = createHRegion(region.getTableDescriptor(), 
region.getRegionInfo(),
+  "deferredRegion", wal, Durability.ASYNC_WAL);
 
 region.put(newPut(null));
 verifyWALCount(wals, wal, 1);
@@ -175,11 +180,10 @@ public class TestDurability {
 byte[] col3 = Bytes.toBytes("col3");
 
 // Setting up region
-final WALFactory wals = new WALFactory(CONF, null,
+WALFactory wals = new WALFactory(CONF, null,
 ServerName.valueOf("TestIncrement", 16010, 
System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestIncrement");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "increment", wal, 
Durability.USE_DEFAULT);
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
 
 // col1: amount = 0, 1 write back to WAL
 Increment inc1 = new 

[2/2] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL

2018-01-11 Thread zhangduo
HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace 
when getting WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/71a1192d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/71a1192d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/71a1192d

Branch: refs/heads/master
Commit: 71a1192d671a93cc17b82e4355f2ace97c41dae5
Parents: 62a8188
Author: zhangduo 
Authored: Thu Jan 11 15:47:08 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 15:47:34 2018 +0800

--
 .../hbase/mapreduce/TestWALRecordReader.java|  50 +++
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   3 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   2 +-
 .../hbase/wal/RegionGroupingProvider.java   |  40 +++---
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  64 -
 .../apache/hadoop/hbase/wal/WALProvider.java|  17 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |   5 +-
 .../hbase/coprocessor/TestWALObserver.java  | 117 +++--
 .../regionserver/TestCacheOnWriteInSchema.java  |  52 
 .../TestCompactionArchiveConcurrentClose.java   |  31 ++---
 .../TestCompactionArchiveIOException.java   |  42 +++---
 .../hbase/regionserver/TestDefaultMemStore.java |  45 +++
 .../hbase/regionserver/TestHMobStore.java   |   3 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  13 +-
 .../regionserver/TestHRegionReplayEvents.java   |  43 +++---
 .../hadoop/hbase/regionserver/TestHStore.java   |  14 +-
 .../TestStoreFileRefresherChore.java|  51 
 .../TestWALMonotonicallyIncreasingSeqId.java|   2 +-
 .../wal/AbstractTestLogRolling.java |   9 +-
 .../hbase/regionserver/wal/TestDurability.java  |  93 +++--
 .../regionserver/wal/TestLogRollAbort.java  |  39 +++---
 .../wal/TestLogRollingNoCluster.java|  17 +--
 .../wal/TestWALActionsListener.java |  25 ++--
 .../TestReplicationSourceManager.java   |   2 +-
 .../regionserver/TestWALEntryStream.java|  13 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |  48 ---
 .../wal/TestBoundedRegionGroupingStrategy.java  |  16 ++-
 .../hadoop/hbase/wal/TestFSHLogProvider.java| 130 ---
 .../apache/hadoop/hbase/wal/TestSecureWAL.java  |  22 +---
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 127 +++---
 .../apache/hadoop/hbase/wal/TestWALMethods.java |   2 +-
 .../hbase/wal/TestWALReaderOnSecureWAL.java |  25 ++--
 .../apache/hadoop/hbase/wal/TestWALRootDir.java |  35 +++--
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  37 +++---
 .../hbase/wal/WALPerformanceEvaluation.java |  47 ---
 37 files changed, 591 insertions(+), 710 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/71a1192d/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index 18bb135..c8db903 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -24,30 +24,28 @@ import static org.junit.Assert.assertTrue;
 import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.MapReduceTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import 

[2/2] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL

2018-01-11 Thread zhangduo
HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace 
when getting WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/814d08a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/814d08a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/814d08a2

Branch: refs/heads/branch-2
Commit: 814d08a2d663ccdbacb33e84475713a72bf3726a
Parents: 0885fe1
Author: zhangduo 
Authored: Thu Jan 11 15:47:08 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 15:48:33 2018 +0800

--
 .../hbase/mapreduce/TestWALRecordReader.java|  50 +++
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   3 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   2 +-
 .../hbase/wal/RegionGroupingProvider.java   |  40 +++---
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  64 -
 .../apache/hadoop/hbase/wal/WALProvider.java|  17 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |   5 +-
 .../hbase/coprocessor/TestWALObserver.java  | 117 +++--
 .../regionserver/TestCacheOnWriteInSchema.java  |  52 
 .../TestCompactionArchiveConcurrentClose.java   |  31 ++---
 .../TestCompactionArchiveIOException.java   |  42 +++---
 .../hbase/regionserver/TestDefaultMemStore.java |  45 +++
 .../hbase/regionserver/TestHMobStore.java   |   3 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  13 +-
 .../regionserver/TestHRegionReplayEvents.java   |  43 +++---
 .../hadoop/hbase/regionserver/TestHStore.java   |  14 +-
 .../TestStoreFileRefresherChore.java|  51 
 .../TestWALMonotonicallyIncreasingSeqId.java|   2 +-
 .../wal/AbstractTestLogRolling.java |   9 +-
 .../hbase/regionserver/wal/TestDurability.java  |  93 +++--
 .../regionserver/wal/TestLogRollAbort.java  |  39 +++---
 .../wal/TestLogRollingNoCluster.java|  17 +--
 .../wal/TestWALActionsListener.java |  25 ++--
 .../TestReplicationSourceManager.java   |   2 +-
 .../regionserver/TestWALEntryStream.java|  13 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |  48 ---
 .../wal/TestBoundedRegionGroupingStrategy.java  |  16 ++-
 .../hadoop/hbase/wal/TestFSHLogProvider.java| 130 ---
 .../apache/hadoop/hbase/wal/TestSecureWAL.java  |  22 +---
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 127 +++---
 .../apache/hadoop/hbase/wal/TestWALMethods.java |   2 +-
 .../hbase/wal/TestWALReaderOnSecureWAL.java |  25 ++--
 .../apache/hadoop/hbase/wal/TestWALRootDir.java |  35 +++--
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  37 +++---
 .../hbase/wal/WALPerformanceEvaluation.java |  47 ---
 37 files changed, 591 insertions(+), 710 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/814d08a2/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index 18bb135..c8db903 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -24,30 +24,28 @@ import static org.junit.Assert.assertTrue;
 import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.MapReduceTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import 

[27/37] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer

2018-01-11 Thread zhangduo
HBASE-19633 Clean up the replication queues in the postPeerModification stage 
when removing a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a55e4dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a55e4dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a55e4dc

Branch: refs/heads/HBASE-19397-branch-2
Commit: 8a55e4dcaaf7174316f064c4370cbf51e8d7d328
Parents: 1766555
Author: zhangduo 
Authored: Tue Jan 2 09:57:23 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  2 +-
 .../replication/VerifyReplication.java  | 34 ++---
 .../hbase/replication/ReplicationPeers.java | 32 ++--
 .../replication/ZKReplicationQueueStorage.java  |  3 +-
 .../replication/ZKReplicationStorageBase.java   |  4 +-
 .../replication/TestReplicationStateBasic.java  | 10 +
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 +-
 .../master/replication/AddPeerProcedure.java|  5 +--
 .../replication/DisablePeerProcedure.java   |  3 +-
 .../master/replication/EnablePeerProcedure.java |  3 +-
 .../master/replication/ModifyPeerProcedure.java | 34 +
 .../replication/RefreshPeerProcedure.java   | 17 -
 .../master/replication/RemovePeerProcedure.java |  7 ++--
 .../replication/ReplicationPeerManager.java | 31 +++-
 .../replication/UpdatePeerConfigProcedure.java  |  3 +-
 .../hbase/regionserver/HRegionServer.java   | 18 -
 .../RemoteProcedureResultReporter.java  |  3 +-
 .../regionserver/RefreshPeerCallable.java   |  5 ++-
 .../regionserver/ReplicationSourceManager.java  | 39 +++-
 .../TestReplicationAdminUsingProcedure.java |  7 ++--
 20 files changed, 134 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a55e4dc/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index b80ee16..fdae288 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -27,8 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A configuration for the replication peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/8a55e4dc/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index f0070f0..fe45762 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.mapreduce.replication;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.UUID;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,13 +44,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
+import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import 

[15/37] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-01-11 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/77d8bc05
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/77d8bc05
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/77d8bc05

Branch: refs/heads/HBASE-19397-branch-2
Commit: 77d8bc0586cb8d4476a47f8d68b213ceb7d06a02
Parents: e921579
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  62 ++-
 .../hbase/master/MockNoopMasterServices.java|  12 +-
 .../hbase/master/TestMasterNoCluster.java   |   4 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1750 insertions(+), 312 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/77d8bc05/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/77d8bc05/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index bd593d3..b28e852 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -121,6 +121,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/77d8bc05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You 

[31/37] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore

2018-01-11 Thread zhangduo
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker 
and remove ReplicationZKNodeCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0eb42daa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0eb42daa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0eb42daa

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0eb42daa2f67326bc106ad4115ee34b8e3d4ad0a
Parents: bb7b0a7
Author: zhangduo 
Authored: Wed Jan 3 09:39:44 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/VerifyReplication.java  |   6 +-
 .../hbase/replication/ReplicationPeers.java |  26 +--
 .../hbase/replication/ReplicationUtils.java |  69 +++
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  13 --
 .../cleaner/ReplicationZKNodeCleaner.java   | 192 ---
 .../cleaner/ReplicationZKNodeCleanerChore.java  |  54 --
 .../replication/ReplicationPeerManager.java |  18 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  13 +-
 .../hbase/util/hbck/ReplicationChecker.java | 109 +++
 .../cleaner/TestReplicationZKNodeCleaner.java   | 109 ---
 .../hbase/util/TestHBaseFsckReplication.java| 101 ++
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |   6 +-
 13 files changed, 259 insertions(+), 459 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0eb42daa/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index fe45762..fac4875 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -345,10 +345,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 }
   });
   ReplicationPeerStorage storage =
-  ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
+ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
   ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
   return Pair.newPair(peerConfig,
-ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf));
+ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf));
 } catch (ReplicationException e) {
   throw new IOException("An error occurred while trying to connect to the 
remove peer cluster",
   e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0eb42daa/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 45940a5..fcbc350 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -17,14 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CompoundConfiguration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -106,25 +103,6 @@ public class ReplicationPeers {
 return 

[19/37] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2018-01-11 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c33f1fcb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c33f1fcb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c33f1fcb

Branch: refs/heads/HBASE-19397-branch-2
Commit: c33f1fcbd95dc8ac5e5558033c5c39d302e52349
Parents: 25095e5
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 169 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 216 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c33f1fcb/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 9065f4e..09d4b4b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -333,7 +332,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -354,9 +352,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c33f1fcb/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing 

[20/37] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2018-01-11 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38ee83c7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38ee83c7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38ee83c7

Branch: refs/heads/HBASE-19397-branch-2
Commit: 38ee83c7b270dfc7eacd06440025583514e6bd53
Parents: c33f1fc
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38ee83c7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need 

[01/37] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL [Forced Update!]

2018-01-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397-branch-2 f84176f13 -> c5d18c0f2 (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/814d08a2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
index 057b9bf..68fa33f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import static org.junit.Assert.assertEquals;
@@ -23,20 +22,21 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
@@ -52,8 +52,10 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
@@ -63,7 +65,7 @@ import org.junit.runners.Parameterized.Parameters;
  * Tests for WAL write durability
  */
 @RunWith(Parameterized.class)
-@Category({RegionServerTests.class, MediumTests.class})
+@Category({ RegionServerTests.class, MediumTests.class })
 public class TestDurability {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static FileSystem FS;
@@ -78,6 +80,9 @@ public class TestDurability {
   @Parameter
   public String walProvider;
 
+  @Rule
+  public TestName name = new TestName();
+
   @Parameters(name = "{index}: provider={0}")
   public static Iterable data() {
 return Arrays.asList(new Object[] { "defaultProvider" }, new Object[] { 
"asyncfs" });
@@ -111,12 +116,12 @@ public class TestDurability {
 
   @Test
   public void testDurability() throws Exception {
-final WALFactory wals = new WALFactory(CONF, null, 
ServerName.valueOf("TestDurability",
-16010, System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestDurability");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "region", wal, 
Durability.USE_DEFAULT);
-HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, 
Durability.ASYNC_WAL);
+WALFactory wals = new WALFactory(CONF, null,
+ServerName.valueOf("TestDurability", 16010, 
System.currentTimeMillis()).toString());
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
+HRegion deferredRegion = createHRegion(region.getTableDescriptor(), 
region.getRegionInfo(),
+  "deferredRegion", wal, Durability.ASYNC_WAL);
 
 region.put(newPut(null));
 verifyWALCount(wals, wal, 1);
@@ -175,11 +180,10 @@ public class TestDurability {
 byte[] col3 = Bytes.toBytes("col3");
 
 // Setting up region
-final WALFactory wals = new WALFactory(CONF, null,
+WALFactory wals = new WALFactory(CONF, null,
 ServerName.valueOf("TestIncrement", 16010, 
System.currentTimeMillis()).toString());
-byte[] tableName = Bytes.toBytes("TestIncrement");
-final WAL wal = wals.getWAL(tableName, null);
-HRegion region = createHRegion(tableName, "increment", wal, 
Durability.USE_DEFAULT);
+HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
+WAL wal = region.getWAL();
 
 // col1: amount = 0, 1 write back to WAL
 

[36/37] hbase git commit: HBASE-19719 Fix checkstyle issues

2018-01-11 Thread zhangduo
HBASE-19719 Fix checkstyle issues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/87077b45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/87077b45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/87077b45

Branch: refs/heads/HBASE-19397-branch-2
Commit: 87077b45eee6d78f9b32e1d0b5c727b17b3c48ef
Parents: d2c10e7
Author: zhangduo 
Authored: Sat Jan 6 08:30:55 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/replication/ReplicationStorageFactory.java   |  2 +-
 .../master/assignment/RegionTransitionProcedure.java   |  4 ++--
 .../hbase/master/procedure/RSProcedureDispatcher.java  | 13 ++---
 .../master/ReplicationPeerConfigUpgrader.java  |  8 
 4 files changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/87077b45/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
index 60d0749..462cfed 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * For now we only have zk based implementation.
  */
 @InterfaceAudience.Private
-public class ReplicationStorageFactory {
+public final class ReplicationStorageFactory {
 
   private ReplicationStorageFactory() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87077b45/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 1724a38..8277dbe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -36,11 +36,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
 /**
  * Base class for the Assign and Unassign Procedure.
  *

http://git-wip-us.apache.org/repos/asf/hbase/blob/87077b45/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 0f68f31..a6d57d2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
@@ -36,6 +35,12 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
@@ -47,12 +52,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
 

[22/37] hbase git commit: HBASE-19642 Fix locking for peer modification procedure

2018-01-11 Thread zhangduo
HBASE-19642 Fix locking for peer modification procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c927a0b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c927a0b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c927a0b

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0c927a0ba42980635a0935adffae628f6e4dbe59
Parents: daa7103
Author: zhangduo 
Authored: Wed Dec 27 18:27:13 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 14 +
 .../master/replication/ModifyPeerProcedure.java | 21 +---
 2 files changed, 32 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c927a0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index a25217c..4ecb3b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -610,6 +610,20 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 public boolean requireExclusiveLock(Procedure proc) {
   return requirePeerExclusiveLock((PeerProcedureInterface) proc);
 }
+
+@Override
+public boolean isAvailable() {
+  if (isEmpty()) {
+return false;
+  }
+  if (getLockStatus().hasExclusiveLock()) {
+// if we have an exclusive lock already taken
+// only child of the lock owner can be executed
+Procedure nextProc = peek();
+return nextProc != null && getLockStatus().hasLockAccess(nextProc);
+  }
+  return true;
+}
   }
 
   // 


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c927a0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 279fbc7..a682606 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure
 
   protected String peerId;
 
+  private volatile boolean locked;
+
   // used to keep compatible with old client where we can only returns after 
updateStorage.
   protected ProcedurePrepareLatch latch;
 
@@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure
 
   @Override
   protected LockState acquireLock(MasterProcedureEnv env) {
-return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)
-  ? LockState.LOCK_EVENT_WAIT
-  : LockState.LOCK_ACQUIRED;
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return  LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
 env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
   }
 
   @Override
+  protected boolean holdLock(MasterProcedureEnv env) {
+return true;
+  }
+
+  @Override
+  protected boolean hasLock(MasterProcedureEnv env) {
+return locked;
+  }
+
+  @Override
   protected void rollbackState(MasterProcedureEnv env, PeerModificationState 
state)
   throws IOException, InterruptedException {
 if (state == PeerModificationState.PRE_PEER_MODIFICATION) {



[17/37] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-01-11 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8cf0ef7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8cf0ef7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8cf0ef7

Branch: refs/heads/HBASE-19397-branch-2
Commit: c8cf0ef7ed5737b30bcddd432ae8b86448eeaed0
Parents: 77d8bc0
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  21 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 176 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 148 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 109 ++
 .../master/ReplicationLogCleaner.java   |  35 +-
 .../regionserver/DumpReplicationQueues.java |  78 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  30 +-
 .../cleaner/TestReplicationHFileCleaner.java|  59 +--
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 227 ---
 .../TestReplicationSourceManagerZkImpl.java |  84 ++---
 25 files changed, 905 insertions(+), 1326 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8cf0ef7/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/c8cf0ef7/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 8e2c5f4..baba3b7 100644
--- 

[32/37] hbase git commit: HBASE-19634 Add permission check for executeProcedures in AccessController

2018-01-11 Thread zhangduo
HBASE-19634 Add permission check for executeProcedures in AccessController


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83e6522e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83e6522e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83e6522e

Branch: refs/heads/HBASE-19397-branch-2
Commit: 83e6522e81d7ef136c241a05b4869d746a368113
Parents: 07dbf9f
Author: zhangduo 
Authored: Thu Jan 4 16:18:21 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/coprocessor/RegionServerObserver.java | 14 +
 .../hbase/regionserver/RSRpcServices.java   | 54 +++-
 .../RegionServerCoprocessorHost.java| 18 +++
 .../hbase/security/access/AccessController.java | 30 ++-
 .../hadoop/hbase/TestJMXConnectorServer.java|  7 +++
 .../security/access/TestAccessController.java   | 18 +--
 6 files changed, 101 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/83e6522e/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
index c1af3fb..5b751df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
@@ -126,4 +126,18 @@ public interface RegionServerObserver {
   default void postClearCompactionQueues(
   final ObserverContext ctx)
   throws IOException {}
+
+  /**
+   * This will be called before executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
preExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
+
+  /**
+   * This will be called after executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
postExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/83e6522e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index e88f70e..695b859 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -41,7 +41,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.LongAdder;
-
 import org.apache.commons.lang3.mutable.MutableObject;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -142,6 +141,7 @@ import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
@@ -3454,36 +3454,40 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   }
 
   @Override
+  @QosPriority(priority = HConstants.ADMIN_QOS)
   public ExecuteProceduresResponse executeProcedures(RpcController controller,
   ExecuteProceduresRequest request) throws ServiceException {
-if (request.getOpenRegionCount() > 0) {
-  for (OpenRegionRequest req : request.getOpenRegionList()) {
-openRegion(controller, req);
+try {
+  checkOpen();
+  regionServer.getRegionServerCoprocessorHost().preExecuteProcedures();
+  if (request.getOpenRegionCount() > 0) {
+for (OpenRegionRequest req : request.getOpenRegionList()) {
+  openRegion(controller, req);
+}
   }
-}
-if (request.getCloseRegionCount() > 0) {
-  for (CloseRegionRequest req : request.getCloseRegionList()) {
-closeRegion(controller, req);
+  if (request.getCloseRegionCount() > 0) {
+for (CloseRegionRequest req : request.getCloseRegionList()) {
+  closeRegion(controller, req);
+

[21/37] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure

2018-01-11 Thread zhangduo
HBASE-19592 Add UTs to test retry on update zk failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/daa7103d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/daa7103d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/daa7103d

Branch: refs/heads/HBASE-19397-branch-2
Commit: daa7103ded955ce5b9598eafb013fe04b8adde51
Parents: 38ee83c
Author: zhangduo 
Authored: Tue Dec 26 20:39:00 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 .../TestReplicationProcedureRetry.java  | 200 +++
 2 files changed, 202 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/daa7103d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index b78cbce..f4ccce8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
-public final class ReplicationPeerManager {
+public class ReplicationPeerManager {
 
   private final ReplicationPeerStorage peerStorage;
 
@@ -61,8 +61,7 @@ public final class ReplicationPeerManager {
 
   private final ConcurrentMap peers;
 
-  private ReplicationPeerManager(ReplicationPeerStorage peerStorage,
-  ReplicationQueueStorage queueStorage,
+  ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,
   ConcurrentMap peers) {
 this.peerStorage = peerStorage;
 this.queueStorage = queueStorage;

http://git-wip-us.apache.org/repos/asf/hbase/blob/daa7103d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
new file mode 100644
index 000..ab35b46
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+

[28/37] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source

2018-01-11 Thread zhangduo
HBASE-19623 Create replication endpoint asynchronously when adding a 
replication source


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e55ff5d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e55ff5d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e55ff5d

Branch: refs/heads/HBASE-19397-branch-2
Commit: 5e55ff5da7625233f8ba4b1b58bd4c033a31e9b2
Parents: 8a55e4d
Author: zhangduo 
Authored: Tue Jan 2 13:25:58 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   8 ++
 .../hbase/replication/ReplicationPeers.java |  18 +--
 .../replication/ZKReplicationPeerStorage.java   |   7 +-
 .../replication/TestReplicationStateBasic.java  |  20 +---
 .../TestZKReplicationPeerStorage.java   |  14 +--
 .../HBaseInterClusterReplicationEndpoint.java   |  17 ++-
 .../RecoveredReplicationSource.java |  13 +--
 .../regionserver/ReplicationSource.java | 110 +++
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  47 +---
 .../client/TestAsyncReplicationAdminApi.java|   2 -
 .../replication/TestReplicationAdmin.java   |   2 -
 .../replication/ReplicationSourceDummy.java |   7 +-
 .../replication/TestReplicationSource.java  |  27 +++--
 .../TestReplicationSourceManager.java   |   8 +-
 15 files changed, 127 insertions(+), 181 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e55ff5d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 4846018..2da3cce 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,14 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Test whether the peer is enabled.
+   * @return {@code true} if enabled, otherwise {@code false}.
+   */
+  default boolean isPeerEnabled() {
+return getPeerState() == PeerState.ENABLED;
+  }
+
+  /**
* Get the peer config object
* @return the ReplicationPeerConfig for this peer
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e55ff5d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 422801b..45940a5 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -86,21 +87,6 @@ public class ReplicationPeers {
   }
 
   /**
-   * Get the peer state for the specified connected remote slave cluster. The 
value might be read
-   * from cache, so it is recommended to use {@link #peerStorage } to read 
storage directly if
-   * reading the state after enabling or disabling it.
-   * @param peerId a short that identifies the cluster
-   * @return true if replication is enabled, false otherwise.
-   */
-  public boolean isPeerEnabled(String peerId) {
-ReplicationPeer replicationPeer = this.peerCache.get(peerId);
-if (replicationPeer == null) {
-  throw new IllegalArgumentException("Peer with id= " + peerId + " is not 
cached");
-}
-return replicationPeer.getPeerState() == PeerState.ENABLED;
-  }
-
-  /**
* Returns the ReplicationPeerImpl for the specified cached peer. This 
ReplicationPeer will
* continue to track changes to the Peer's state and config. This method 
returns null if no peer
* has been cached with the given peerId.
@@ -117,7 +103,7 @@ public class ReplicationPeers {
* @return a Set of Strings for peerIds
*/
   public Set getAllPeerIds() {
-return peerCache.keySet();
+return Collections.unmodifiableSet(peerCache.keySet());
   }
 
   public static Configuration 
getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,


[23/37] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-01-11 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/17665554/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 853bafb..24a4f30 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -166,7 +166,6 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 this.clusterId = clusterId;
 this.walFileLengthProvider = walFileLengthProvider;
 this.replicationTracker.registerListener(this);
-this.replicationPeers.getAllPeerIds();
 // It's preferable to failover 1 RS at a time, but with good zk servers
 // more could be processed at the same time.
 int nbWorkers = conf.getInt("replication.executor.workers", 1);
@@ -270,8 +269,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 }
 List otherRegionServers = 
replicationTracker.getListOfRegionServers().stream()
 .map(ServerName::valueOf).collect(Collectors.toList());
-LOG.info(
-  "Current list of replicators: " + currentReplicators + " other RSs: " + 
otherRegionServers);
+LOG.info("Current list of replicators: " + currentReplicators + " other 
RSs: "
++ otherRegionServers);
 
 // Look if there's anything to process after a restart
 for (ServerName rs : currentReplicators) {
@@ -288,7 +287,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* The returned future is for adoptAbandonedQueues task.
*/
   Future init() throws IOException, ReplicationException {
-for (String id : this.replicationPeers.getConnectedPeerIds()) {
+for (String id : this.replicationPeers.getAllPeerIds()) {
   addSource(id);
   if (replicationForBulkLoadDataEnabled) {
 // Check if peer exists in hfile-refs queue, if not add it. This can 
happen in the case
@@ -307,8 +306,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
*/
   @VisibleForTesting
   ReplicationSourceInterface addSource(String id) throws IOException, 
ReplicationException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(id);
-ReplicationPeer peer = replicationPeers.getConnectedPeer(id);
+ReplicationPeerConfig peerConfig = replicationPeers.getPeerConfig(id);
+ReplicationPeer peer = replicationPeers.getPeer(id);
 ReplicationSourceInterface src = getReplicationSource(id, peerConfig, 
peer);
 synchronized (this.walsById) {
   this.sources.add(src);
@@ -354,7 +353,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   public void deleteSource(String peerId, boolean closeConnection) {
 abortWhenFail(() -> this.queueStorage.removeQueue(server.getServerName(), 
peerId));
 if (closeConnection) {
-  this.replicationPeers.peerDisconnected(peerId);
+  this.replicationPeers.removePeer(peerId);
 }
   }
 
@@ -445,12 +444,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 // update replication queues on ZK
 // synchronize on replicationPeers to avoid adding source for the 
to-be-removed peer
 synchronized (replicationPeers) {
-  for (String id : replicationPeers.getConnectedPeerIds()) {
+  for (String id : replicationPeers.getAllPeerIds()) {
 try {
   this.queueStorage.addWAL(server.getServerName(), id, logName);
 } catch (ReplicationException e) {
-  throw new IOException("Cannot add log to replication queue" +
-" when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
+  throw new IOException("Cannot add log to replication queue"
+  + " when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
 }
   }
 }
@@ -593,7 +592,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   public void addPeer(String id) throws ReplicationException, IOException {
 LOG.info("Trying to add peer, peerId: " + id);
-boolean added = this.replicationPeers.peerConnected(id);
+boolean added = this.replicationPeers.addPeer(id);
 if (added) {
   LOG.info("Peer " + id + " connected success, trying to start the 
replication source thread.");
   addSource(id);
@@ -729,19 +728,25 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   // there is not an actual peer defined corresponding to peerId for 
the failover.
   ReplicationQueueInfo replicationQueueInfo = new 
ReplicationQueueInfo(peerId);

[18/37] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2018-01-11 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25095e58
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25095e58
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25095e58

Branch: refs/heads/HBASE-19397-branch-2
Commit: 25095e58996479cf11ac93c66a018772fb1e9555
Parents: c8cf0ef
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25095e58/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/25095e58/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[34/37] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-01-11 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/69fac916/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index fc978be..e087127 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
@@ -33,7 +31,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
@@ -70,27 +68,53 @@ import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * This class is responsible to manage all the replication
- * sources. There are two classes of sources:
+ * This class is responsible to manage all the replication sources. There are 
two classes of
+ * sources:
  * 
- *  Normal sources are persistent and one per peer cluster
- *  Old sources are recovered from a failed region server and our
- * only goal is to finish replicating the WAL queue it had up in ZK
+ * Normal sources are persistent and one per peer cluster
+ * Old sources are recovered from a failed region server and our only goal 
is to finish
+ * replicating the WAL queue it had
+ * 
+ * 
+ * When a region server dies, this class uses a watcher to get notified and it 
tries to grab a lock
+ * in order to transfer all the queues in a local old source.
+ * 
+ * Synchronization specification:
+ * 
+ * No need synchronized on {@link #sources}. {@link #sources} is a 
ConcurrentHashMap and there
+ * is a Lock for peer id in {@link PeerProcedureHandlerImpl}. So there is no 
race for peer
+ * operations.
+ * Need synchronized on {@link #walsById}. There are four methods which 
modify it,
+ * {@link #addPeer(String)}, {@link #removePeer(String)},
+ * {@link #cleanOldLogs(SortedSet, String, String)} and {@link 
#preLogRoll(Path)}. {@link #walsById}
+ * is a ConcurrentHashMap and there is a Lock for peer id in {@link 
PeerProcedureHandlerImpl}. So
+ * there is no race between {@link #addPeer(String)} and {@link 
#removePeer(String)}.
+ * {@link #cleanOldLogs(SortedSet, String, String)} is called by {@link 
ReplicationSourceInterface}.
+ * So no race with {@link #addPeer(String)}. {@link #removePeer(String)} will 
terminate the
+ * {@link ReplicationSourceInterface} firstly, then remove the wals from 
{@link #walsById}. So no
+ * race with {@link #removePeer(String)}. The only case need synchronized is
+ * {@link #cleanOldLogs(SortedSet, String, String)} and {@link 
#preLogRoll(Path)}.
+ * No need synchronized on {@link #walsByIdRecoveredQueues}. There are 
three methods which
+ * modify it, {@link #removePeer(String)} , {@link #cleanOldLogs(SortedSet, 
String, String)} and
+ * {@link ReplicationSourceManager.NodeFailoverWorker#run()}.
+ * {@link #cleanOldLogs(SortedSet, String, String)} is called by {@link 
ReplicationSourceInterface}.
+ * {@link #removePeer(String)} will terminate the {@link 
ReplicationSourceInterface} firstly, then
+ * remove the wals from {@link #walsByIdRecoveredQueues}. And
+ * {@link ReplicationSourceManager.NodeFailoverWorker#run()} will add the wals 
to
+ * {@link #walsByIdRecoveredQueues} firstly, then start up a {@link 
ReplicationSourceInterface}. So
+ * there is no race here. For {@link 
ReplicationSourceManager.NodeFailoverWorker#run()} and
+ * {@link #removePeer(String)}, there is already synchronized on {@link 
#oldsources}. So no need
+ * synchronized on {@link #walsByIdRecoveredQueues}.
+ * Need synchronized on {@link #latestPaths} to avoid the new open source 
miss new log.
+ * Need synchronized on {@link #oldsources} to avoid adding recovered 
source for the
+ * to-be-removed peer.
  * 
- *
- * When a region server dies, this class uses a watcher to get notified and it
- * tries to grab a lock in order to transfer all the queues in a local
- * old 

[29/37] hbase git commit: HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl

2018-01-11 Thread zhangduo
HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bab5fb1c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bab5fb1c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bab5fb1c

Branch: refs/heads/HBASE-19397-branch-2
Commit: bab5fb1c598053e2d4d63a6fd94bc3bd97355755
Parents: 5e55ff5
Author: zhangduo 
Authored: Tue Jan 2 16:13:55 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../regionserver/PeerProcedureHandlerImpl.java  | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bab5fb1c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index 1efe180..c09c6a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-
+import java.util.concurrent.locks.Lock;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -32,7 +32,7 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   private static final Logger LOG = 
LoggerFactory.getLogger(PeerProcedureHandlerImpl.class);
 
   private final ReplicationSourceManager replicationSourceManager;
-  private final ReentrantLock peersLock = new ReentrantLock();
+  private final KeyLocker peersLock = new KeyLocker<>();
 
   public PeerProcedureHandlerImpl(ReplicationSourceManager 
replicationSourceManager) {
 this.replicationSourceManager = replicationSourceManager;
@@ -40,40 +40,57 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
 
   @Override
   public void addPeer(String peerId) throws ReplicationException, IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   replicationSourceManager.addPeer(peerId);
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void removePeer(String peerId) throws ReplicationException, 
IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   if (replicationSourceManager.getReplicationPeers().getPeer(peerId) != 
null) {
 replicationSourceManager.removePeer(peerId);
   }
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void disablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("disable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("disable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void enablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("enable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("enable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void updatePeerConfig(String peerId) throws ReplicationException, 
IOException {
-replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  

[16/37] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-01-11 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/c8cf0ef7/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 6e27a21..d8f9625 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,13 +21,13 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,17 +48,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String 

[35/37] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-01-11 Thread zhangduo
HBASE-19636 All rs should already start work with the new peer change when 
replication peer procedure is finished

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/69fac916
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/69fac916
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/69fac916

Branch: refs/heads/HBASE-19397-branch-2
Commit: 69fac916567cf40f0018faea2471eb93a8a827b9
Parents: 83e6522
Author: Guanghao Zhang 
Authored: Thu Jan 4 16:58:01 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   1 -
 .../hbase/replication/ReplicationPeerImpl.java  |   4 +-
 .../hbase/replication/ReplicationQueueInfo.java |  23 +-
 .../hbase/replication/ReplicationUtils.java |  56 ++
 .../replication/TestReplicationStateZKImpl.java |  22 -
 .../regionserver/ReplicationSourceService.java  |   3 +-
 .../regionserver/PeerProcedureHandler.java  |   3 +
 .../regionserver/PeerProcedureHandlerImpl.java  |  50 +-
 .../RecoveredReplicationSource.java |   6 +-
 .../RecoveredReplicationSourceShipper.java  |   8 +-
 .../replication/regionserver/Replication.java   |  15 +-
 .../regionserver/ReplicationSource.java |  34 +-
 .../regionserver/ReplicationSourceFactory.java  |   4 +-
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  | 895 ++-
 .../regionserver/ReplicationSourceShipper.java  |   6 +-
 .../ReplicationSourceWALReader.java |   2 +-
 .../replication/ReplicationSourceDummy.java |   2 +-
 .../replication/TestNamespaceReplication.java   |  57 +-
 .../TestReplicationSourceManager.java   |  11 +-
 .../TestReplicationSourceManagerZkImpl.java |   1 -
 21 files changed, 659 insertions(+), 552 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/69fac916/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index fdae288..bf8d030 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/69fac916/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index 3e17025..604e0bb 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class ReplicationPeerImpl implements ReplicationPeer {
+
   private final Configuration conf;
 
   private final String id;

http://git-wip-us.apache.org/repos/asf/hbase/blob/69fac916/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
index ecd888f..cd65f9b 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
@@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
- * This class is responsible for the parsing logic for a znode representing a 
queue.
+ * This class is 

[25/37] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-01-11 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/11158715/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
index b6cf54d..4b9ed74 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -32,9 +31,10 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Interface that defines a replication source
@@ -47,15 +47,10 @@ public interface ReplicationSourceInterface {
* @param conf the configuration to use
* @param fs the file system to use
* @param manager the manager to use
-   * @param replicationQueues
-   * @param replicationPeers
* @param server the server for this region server
-   * @param peerClusterZnode
-   * @param clusterId
-   * @throws IOException
*/
   void init(Configuration conf, FileSystem fs, ReplicationSourceManager 
manager,
-  ReplicationQueues replicationQueues, ReplicationPeers replicationPeers, 
Server server,
+  ReplicationQueueStorage queueStorage, ReplicationPeers replicationPeers, 
Server server,
   String peerClusterZnode, UUID clusterId, ReplicationEndpoint 
replicationEndpoint,
   WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) 
throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/11158715/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index b1d82c8..853bafb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -34,19 +34,21 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
@@ -60,7 +62,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -68,6 +70,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import 

[13/37] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2018-01-11 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9215791
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9215791
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9215791

Branch: refs/heads/HBASE-19397-branch-2
Commit: e9215791b06737245aa9eaec16b5dd672b851739
Parents: 00dea8b
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 186 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   9 +-
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |  10 +
 .../regionserver/ReplicationSource.java |   9 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  51 -
 .../TestReplicationSourceManager.java   |  32 ++-
 21 files changed, 535 insertions(+), 474 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9215791/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 7b63cd6..7d83687 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -200,7 +202,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -225,10 +227,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e9215791/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index b26802f..5e6b3db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -280,7 +282,7 @@ public final class ProtobufUtil {
* byte array 

[11/37] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2018-01-11 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0d4693d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0d4693d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0d4693d2

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0d4693d286f01d3ac0b1a3b57f492783ae3d1031
Parents: c247dc1
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0d4693d2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 8ff2d12..a25217c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -389,6 +389,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -1041,7 +1048,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/0d4693d2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 0291165..fd77e1f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -905,6 +905,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -927,22 +948,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static 

[30/37] hbase git commit: HBASE-19544 Add UTs for testing concurrent modifications on replication peer

2018-01-11 Thread zhangduo
HBASE-19544 Add UTs for testing concurrent modifications on replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb7b0a7e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb7b0a7e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb7b0a7e

Branch: refs/heads/HBASE-19397-branch-2
Commit: bb7b0a7ec15592d7e12800920bce91a36e0d8823
Parents: bab5fb1
Author: Guanghao Zhang 
Authored: Tue Jan 2 17:07:41 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/TestReplicationAdmin.java   | 69 
 1 file changed, 69 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb7b0a7e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 9b71595..89cf393 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -55,6 +56,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Unit testing of ReplicationAdmin
@@ -62,6 +65,8 @@ import org.junit.rules.TestName;
 @Category({MediumTests.class, ClientTests.class})
 public class TestReplicationAdmin {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationAdmin.class);
+
   private final static HBaseTestingUtility TEST_UTIL =
   new HBaseTestingUtility();
 
@@ -111,6 +116,70 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testConcurrentPeerOperations() throws Exception {
+int threadNum = 5;
+AtomicLong successCount = new AtomicLong(0);
+
+// Test concurrent add peer operation
+Thread[] addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent remove peer operation
+successCount.set(0);
+Thread[] removePeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  removePeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.removeReplicationPeer(ID_ONE);
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when remove replication peer", e);
+}
+  });
+  removePeers[i].start();
+}
+for (Thread removePeer : removePeers) {
+  removePeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent add peer operation again
+successCount.set(0);
+addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+  }
+
+  @Test
   public void testAddInvalidPeer() {
 ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
 builder.setClusterKey(KEY_ONE);



[06/37] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2018-01-11 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3cc7ac6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3cc7ac6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3cc7ac6f

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3cc7ac6f8d00ca8f8aef3b1f4251d8e9d45368c2
Parents: 814d08a
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  33 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 224 +--
 .../procedure/PeerProcedureInterface.java   |  34 +++
 .../master/procedure/RSProcedureDispatcher.java | 101 +
 .../master/replication/ModifyPeerProcedure.java | 127 +++
 .../master/replication/RefreshPeerCallable.java |  67 ++
 .../replication/RefreshPeerProcedure.java   | 197 
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  90 ++--
 .../hbase/regionserver/RSRpcServices.java   |  56 +++--
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 +++
 .../security/access/TestAccessController.java   |   1 +
 24 files changed, 1122 insertions(+), 184 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc7ac6f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cc7ac6f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 71932b8..78c49fb 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the {@code 
reportProcedureDone}
+ * method.
+ * @param error the error message
+ */
+void 

[05/37] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure

2018-01-11 Thread zhangduo
HBASE-19697 Remove TestReplicationAdminUsingProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/07dbf9fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/07dbf9fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/07dbf9fb

Branch: refs/heads/HBASE-19397-branch-2
Commit: 07dbf9fb6d3b61e6aa263931a0c5bc602890cead
Parents: 8b99b5a
Author: zhangduo 
Authored: Wed Jan 3 21:13:57 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../TestReplicationAdminUsingProcedure.java | 225 ---
 1 file changed, 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/07dbf9fb/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
deleted file mode 100644
index 1300376..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client.replication;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.TestReplicationBase;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-
-@Category({ MediumTests.class, ClientTests.class })
-public class TestReplicationAdminUsingProcedure extends TestReplicationBase {
-
-  private static final String PEER_ID = "2";
-  private static final Logger LOG = 
Logger.getLogger(TestReplicationAdminUsingProcedure.class);
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-conf1.setInt("hbase.multihconnection.threads.max", 10);
-
-// Start the master & slave mini cluster.
-TestReplicationBase.setUpBeforeClass();
-
-// Remove the replication peer
-hbaseAdmin.removeReplicationPeer(PEER_ID);
-  }
-
-  private void loadData(int startRowKey, int endRowKey) throws IOException {
-for (int i = startRowKey; i < endRowKey; i++) {
-  byte[] rowKey = Bytes.add(row, Bytes.toBytes(i));
-  Put put = new Put(rowKey);
-  put.addColumn(famName, null, Bytes.toBytes(i));
-  htable1.put(put);
-}
-  }
-
-  private void waitForReplication(int expectedRows, int retries)
-  throws IOException, InterruptedException {
-Scan scan;
-for (int i = 0; i < retries; i++) {
-  scan = new Scan();
-  if (i == retries - 1) {
-throw new IOException("Waited too much time for normal batch 
replication");
-  }
-  try (ResultScanner scanner = htable2.getScanner(scan)) {
-int count = 0;
-for (Result res : scanner) {
-  count++;
-}
-if (count != expectedRows) {
-  LOG.info("Only got " + count + " rows,  expected rows: " + 
expectedRows);
-  Thread.sleep(SLEEP_TIME);
-} else {
-  return;
-}
-  }
-}
-  }
-
-  @Before
-  public void setUp() throws IOException {
-

[09/37] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2018-01-11 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb8b8861
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb8b8861
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb8b8861

Branch: refs/heads/HBASE-19397-branch-2
Commit: fb8b886177246e4b34906200cb628c67aac20d54
Parents: 8a34dc8
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb8b8861/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f61b32e..6729473 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2466,7 +2466,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2477,7 +2477,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2485,6 +2485,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2492,6 +2523,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to 

[07/37] hbase git commit: HBASE-19635 Introduce a thread at RS side to call reportProcedureDone

2018-01-11 Thread zhangduo
HBASE-19635 Introduce a thread at RS side to call reportProcedureDone


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bfe17593
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bfe17593
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bfe17593

Branch: refs/heads/HBASE-19397-branch-2
Commit: bfe175931a24199b10ac81cb7462e37659551ba0
Parents: 1115871
Author: zhangduo 
Authored: Wed Dec 27 20:13:42 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../src/main/protobuf/RegionServerStatus.proto  |   5 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  15 ++-
 .../hbase/regionserver/HRegionServer.java   |  72 
 .../RemoteProcedureResultReporter.java  | 111 +++
 .../handler/RSProcedureHandler.java |   2 +-
 5 files changed, 149 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bfe17593/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 4f75941..3f836cd 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -146,7 +146,7 @@ message RegionSpaceUseReportRequest {
 message RegionSpaceUseReportResponse {
 }
 
-message ReportProcedureDoneRequest {
+message RemoteProcedureResult {
   required uint64 proc_id = 1;
   enum Status {
 SUCCESS = 1;
@@ -155,6 +155,9 @@ message ReportProcedureDoneRequest {
   required Status status = 2;
   optional ForeignExceptionMessage error = 3;
 }
+message ReportProcedureDoneRequest {
+  repeated RemoteProcedureResult result = 1;
+}
 
 message ReportProcedureDoneResponse {
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bfe17593/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 72bf2d1..377a9c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -265,6 +265,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
@@ -2254,12 +2255,14 @@ public class MasterRpcServices extends RSRpcServices
   @Override
   public ReportProcedureDoneResponse reportProcedureDone(RpcController 
controller,
   ReportProcedureDoneRequest request) throws ServiceException {
-if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
-  master.remoteProcedureCompleted(request.getProcId());
-} else {
-  master.remoteProcedureFailed(request.getProcId(),
-RemoteProcedureException.fromProto(request.getError()));
-}
+request.getResultList().forEach(result -> {
+  if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
+master.remoteProcedureCompleted(result.getProcId());
+  } else {
+master.remoteProcedureFailed(result.getProcId(),
+  RemoteProcedureException.fromProto(result.getError()));
+  }
+});
 return ReportProcedureDoneResponse.getDefaultInstance();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bfe17593/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 226976c..29ff2ad 100644
--- 

[10/37] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2018-01-11 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c247dc1b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c247dc1b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c247dc1b

Branch: refs/heads/HBASE-19397-branch-2
Commit: c247dc1bf74afe6d51336c0cf6527c21ee39f930
Parents: fb8b886
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c247dc1b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8025a51..72bf2d1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch 

[12/37] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2018-01-11 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/00dea8bc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/00dea8bc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/00dea8bc

Branch: refs/heads/HBASE-19397-branch-2
Commit: 00dea8bc6485de9c97a91a242289866d7d1d9e3a
Parents: 0d4693d
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/00dea8bc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/00dea8bc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/00dea8bc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java

[14/37] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-01-11 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/77d8bc05/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : 

[04/37] hbase git commit: HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

2018-01-11 Thread zhangduo
HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d2c10e74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d2c10e74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d2c10e74

Branch: refs/heads/HBASE-19397-branch-2
Commit: d2c10e745121addfd6ddf9aaeecc1d205377df6d
Parents: 2a2c6a6
Author: Guanghao Zhang 
Authored: Fri Jan 5 15:39:06 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 23 
 1 file changed, 19 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d2c10e74/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 4ecb3b1..0400de4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -402,7 +402,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   @Override
   public void completionCleanup(final Procedure proc) {
 if (proc instanceof TableProcedureInterface) {
-  TableProcedureInterface iProcTable = (TableProcedureInterface)proc;
+  TableProcedureInterface iProcTable = (TableProcedureInterface) proc;
   boolean tableDeleted;
   if (proc.hasException()) {
 Exception procEx = proc.getException().unwrapRemoteException();
@@ -423,9 +423,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   }
 } else if (proc instanceof PeerProcedureInterface) {
   PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc;
-  if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) {
-removePeerQueue(iProcPeer.getPeerId());
-  }
+  tryCleanupPeerQueue(iProcPeer.getPeerId(), proc);
 } else {
   // No cleanup for ServerProcedureInterface types, yet.
   return;
@@ -514,6 +512,23 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 locking.removePeerLock(peerId);
   }
 
+  private void tryCleanupPeerQueue(String peerId, Procedure procedure) {
+schedLock();
+try {
+  PeerQueue queue = AvlTree.get(peerMap, peerId, 
PEER_QUEUE_KEY_COMPARATOR);
+  if (queue == null) {
+return;
+  }
+
+  final LockAndQueue lock = locking.getPeerLock(peerId);
+  if (queue.isEmpty() && lock.tryExclusiveLock(procedure)) {
+removeFromRunQueue(peerRunQueue, queue);
+removePeerQueue(peerId);
+  }
+} finally {
+  schedUnlock();
+}
+  }
 
   private static boolean isPeerProcedure(Procedure proc) {
 return proc instanceof PeerProcedureInterface;



[33/37] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase

2018-01-11 Thread zhangduo
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b99b5a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b99b5a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b99b5a3

Branch: refs/heads/HBASE-19397-branch-2
Commit: 8b99b5a3eb24bcb8dfadc9351483344f7cc33fae
Parents: 0eb42da
Author: huzheng 
Authored: Fri Dec 29 15:55:28 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   5 +-
 .../replication/ReplicationStateZKBase.java | 153 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +--
 .../replication/ZKReplicationPeerStorage.java   |  24 ++-
 .../replication/ZKReplicationStorageBase.java   |  13 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../master/ReplicationPeerConfigUpgrader.java   | 128 
 .../regionserver/DumpReplicationQueues.java |  18 +--
 .../replication/regionserver/Replication.java   |   3 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   3 +-
 .../TestReplicationTrackerZKImpl.java   |   3 +-
 .../replication/master/TestTableCFsUpdater.java |  41 ++---
 .../TestReplicationSourceManager.java   |   6 +-
 13 files changed, 136 insertions(+), 286 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b99b5a3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c66aff..2a970ba 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -33,9 +33,8 @@ public class ReplicationFactory {
 return new ReplicationPeers(zk, conf);
   }
 
-  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,
-  final ReplicationPeers replicationPeers, Configuration conf, Abortable 
abortable,
+  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, 
Abortable abortable,
   Stoppable stopper) {
-return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, 
abortable, stopper);
+return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b99b5a3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
deleted file mode 100644
index f49537c..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import 

[26/37] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-01-11 Thread zhangduo
HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/11158715
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/11158715
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/11158715

Branch: refs/heads/HBASE-19397-branch-2
Commit: 11158715326f87051fc6ede61375d6ed11f47f04
Parents: 0c927a0
Author: zhangduo 
Authored: Wed Dec 27 22:03:51 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   9 +-
 .../hbase/replication/ReplicationQueues.java| 160 ---
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesZKImpl.java| 407 -
 .../hbase/replication/ReplicationTableBase.java | 442 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +-
 .../replication/ZKReplicationQueueStorage.java  |  22 +
 .../replication/TestReplicationStateBasic.java  | 131 +++---
 .../replication/TestReplicationStateZKImpl.java |  41 +-
 .../regionserver/DumpReplicationQueues.java |  15 +-
 .../RecoveredReplicationSource.java |  17 +-
 .../RecoveredReplicationSourceShipper.java  |  22 +-
 .../replication/regionserver/Replication.java   |  41 +-
 .../regionserver/ReplicationSource.java |  23 +-
 .../ReplicationSourceInterface.java |  11 +-
 .../regionserver/ReplicationSourceManager.java  | 261 ++-
 .../regionserver/ReplicationSyncUp.java |  29 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |  12 +-
 .../cleaner/TestReplicationHFileCleaner.java|  26 +-
 .../cleaner/TestReplicationZKNodeCleaner.java   |  22 +-
 .../replication/ReplicationSourceDummy.java |   6 +-
 .../replication/TestReplicationSyncUpTool.java  |   6 +-
 .../TestReplicationSourceManager.java   | 104 ++---
 .../TestReplicationSourceManagerZkImpl.java |  58 +--
 24 files changed, 385 insertions(+), 1571 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/11158715/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c1c213..5e70e57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import org.apache.commons.lang3.reflect.ConstructorUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A factory class for instantiating replication objects that deal with 
replication state.
@@ -30,12 +29,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
-  throws Exception {
-return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
-  args);
-  }
-
   public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
   Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/11158715/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
deleted file mode 100644
index 7f440b1..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy 

[24/37] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-01-11 Thread zhangduo
HBASE-19622 Reimplement ReplicationPeers with the new replication storage 
interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/17665554
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/17665554
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/17665554

Branch: refs/heads/HBASE-19397-branch-2
Commit: 1766555458f64bd0042c35009033e1992d528799
Parents: bfe1759
Author: huzheng 
Authored: Tue Dec 26 16:46:10 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  10 +-
 .../replication/VerifyReplication.java  |   9 +-
 .../hbase/replication/ReplicationFactory.java   |  10 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  60 +-
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../hbase/replication/ReplicationPeers.java | 238 
 .../replication/ReplicationPeersZKImpl.java | 542 ---
 .../replication/ZKReplicationPeerStorage.java   |  12 +-
 .../replication/ZKReplicationStorageBase.java   |   3 +-
 .../replication/TestReplicationStateBasic.java  | 125 ++---
 .../replication/TestReplicationStateZKImpl.java |   2 +-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../cleaner/ReplicationZKNodeCleaner.java   |  57 +-
 .../replication/ReplicationPeerManager.java |   6 +-
 .../regionserver/DumpReplicationQueues.java |   2 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  49 +-
 .../replication/regionserver/Replication.java   |   2 +-
 .../regionserver/ReplicationSource.java |   6 +-
 .../regionserver/ReplicationSourceManager.java  |  45 +-
 .../cleaner/TestReplicationHFileCleaner.java|   7 +-
 .../replication/TestMultiSlaveReplication.java  |   2 -
 .../TestReplicationTrackerZKImpl.java   |  36 +-
 .../TestReplicationSourceManager.java   |  17 +-
 .../hadoop/hbase/HBaseZKTestingUtility.java |   3 +-
 24 files changed, 308 insertions(+), 950 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/17665554/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 022bf64..a234a9b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -247,22 +247,22 @@ public final class ReplicationPeerConfigUtil {
   public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
   throws DeserializationException {
 if (ProtobufUtil.isPBMagicPrefix(bytes)) {
-  int pblen = ProtobufUtil.lengthOfPBMagic();
+  int pbLen = ProtobufUtil.lengthOfPBMagic();
   ReplicationProtos.ReplicationPeer.Builder builder =
   ReplicationProtos.ReplicationPeer.newBuilder();
   ReplicationProtos.ReplicationPeer peer;
   try {
-ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
+ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
 peer = builder.build();
   } catch (IOException e) {
 throw new DeserializationException(e);
   }
   return convert(peer);
 } else {
-  if (bytes.length > 0) {
-return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
+  if (bytes == null || bytes.length <= 0) {
+throw new DeserializationException("Bytes to deserialize should not be 
empty.");
   }
-  return ReplicationPeerConfig.newBuilder().setClusterKey("").build();
+  return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/17665554/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 09d4b4b..f0070f0 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -339,15 +339,10 @@ public class VerifyReplication extends Configured 
implements 

[37/37] hbase git commit: HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint

2018-01-11 Thread zhangduo
HBASE-19707 Race in start and terminate of a replication source after we async 
start replicatione endpoint


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a2c6a69
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a2c6a69
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a2c6a69

Branch: refs/heads/HBASE-19397-branch-2
Commit: 2a2c6a69c0e5d0403cd16c84a12d4fc7274199f8
Parents: 69fac91
Author: zhangduo 
Authored: Fri Jan 5 18:28:44 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../RecoveredReplicationSource.java |  16 +-
 .../regionserver/ReplicationSource.java | 202 ++-
 .../replication/TestReplicationAdmin.java   |   1 -
 3 files changed, 116 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a2c6a69/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index 1be9a88..3cae0f2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -68,7 +68,7 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   LOG.debug("Someone has beat us to start a worker thread for wal group " 
+ walGroupId);
 } else {
   LOG.debug("Starting up worker for wal group " + walGroupId);
-  worker.startup(getUncaughtExceptionHandler());
+  worker.startup(this::uncaughtException);
   worker.setWALReader(
 startNewWALReader(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
   workerThreads.put(walGroupId, worker);
@@ -76,13 +76,13 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   @Override
-  protected ReplicationSourceWALReader startNewWALReader(String threadName,
-  String walGroupId, PriorityBlockingQueue queue, long 
startPosition) {
-ReplicationSourceWALReader walReader = new 
RecoveredReplicationSourceWALReader(fs,
-conf, queue, startPosition, walEntryFilter, this);
-Threads.setDaemonThreadRunning(walReader, threadName
-+ ".replicationSource.replicationWALReaderThread." + walGroupId + "," 
+ queueId,
-  getUncaughtExceptionHandler());
+  protected ReplicationSourceWALReader startNewWALReader(String threadName, 
String walGroupId,
+  PriorityBlockingQueue queue, long startPosition) {
+ReplicationSourceWALReader walReader =
+  new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, 
walEntryFilter, this);
+Threads.setDaemonThreadRunning(walReader,
+  threadName + ".replicationSource.replicationWALReaderThread." + 
walGroupId + "," + queueId,
+  this::uncaughtException);
 return walReader;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2a2c6a69/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 6b622ee..923d893 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -76,7 +76,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * 
  */
 @InterfaceAudience.Private
-public class ReplicationSource extends Thread implements 
ReplicationSourceInterface {
+public class ReplicationSource implements ReplicationSourceInterface {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationSource.class);
   // Queues of logs to process, entry in format of walGroupId->queue,
@@ -115,10 +115,8 @@ public class ReplicationSource extends Thread implements 
ReplicationSourceInterf
   private MetricsSource metrics;
   // WARN threshold for the number of queued logs, defaults to 2
   private int logQueueWarnThreshold;
-  // whether the replication endpoint has been initialized
-  private volatile boolean endpointInitialized = false;
   // ReplicationEndpoint which will handle the actual replication
-  private ReplicationEndpoint 

[08/37] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2018-01-11 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a34dc88
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a34dc88
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a34dc88

Branch: refs/heads/HBASE-19397-branch-2
Commit: 8a34dc88318ef505a687594b7021a9a693300a7c
Parents: 3cc7ac6
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |   3 +-
 .../src/main/protobuf/MasterProcedure.proto |  21 +++-
 .../src/main/protobuf/RegionServerStatus.proto  |   3 +-
 .../src/main/protobuf/Replication.proto |   5 +
 .../replication/ReplicationPeersZKImpl.java |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 100 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/MasterServices.java |  26 +++--
 .../assignment/RegionTransitionProcedure.java   |  13 +--
 .../master/procedure/MasterProcedureEnv.java|   5 +
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/replication/AddPeerProcedure.java|  97 ++
 .../replication/DisablePeerProcedure.java   |  70 +
 .../master/replication/EnablePeerProcedure.java |  69 +
 .../master/replication/ModifyPeerProcedure.java |  97 +++---
 .../master/replication/RefreshPeerCallable.java |  67 -
 .../replication/RefreshPeerProcedure.java   |  28 --
 .../master/replication/RemovePeerProcedure.java |  69 +
 .../master/replication/ReplicationManager.java  |  76 +++---
 .../replication/UpdatePeerConfigProcedure.java  |  92 +
 .../hbase/regionserver/HRegionServer.java   |   6 +-
 .../regionserver/RefreshPeerCallable.java   |  70 +
 .../hbase/master/MockNoopMasterServices.java|  23 +++--
 .../replication/DummyModifyPeerProcedure.java   |  13 ++-
 24 files changed, 737 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a34dc88/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 78c49fb..3b925a6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcher

[02/37] hbase git commit: HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace when getting WAL

2018-01-11 Thread zhangduo
HBASE-19751 Use RegionInfo directly instead of an identifier and a namespace 
when getting WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/814d08a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/814d08a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/814d08a2

Branch: refs/heads/HBASE-19397-branch-2
Commit: 814d08a2d663ccdbacb33e84475713a72bf3726a
Parents: 0885fe1
Author: zhangduo 
Authored: Thu Jan 11 15:47:08 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 15:48:33 2018 +0800

--
 .../hbase/mapreduce/TestWALRecordReader.java|  50 +++
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   3 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   2 +-
 .../hbase/wal/RegionGroupingProvider.java   |  40 +++---
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  64 -
 .../apache/hadoop/hbase/wal/WALProvider.java|  17 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |   5 +-
 .../hbase/coprocessor/TestWALObserver.java  | 117 +++--
 .../regionserver/TestCacheOnWriteInSchema.java  |  52 
 .../TestCompactionArchiveConcurrentClose.java   |  31 ++---
 .../TestCompactionArchiveIOException.java   |  42 +++---
 .../hbase/regionserver/TestDefaultMemStore.java |  45 +++
 .../hbase/regionserver/TestHMobStore.java   |   3 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  13 +-
 .../regionserver/TestHRegionReplayEvents.java   |  43 +++---
 .../hadoop/hbase/regionserver/TestHStore.java   |  14 +-
 .../TestStoreFileRefresherChore.java|  51 
 .../TestWALMonotonicallyIncreasingSeqId.java|   2 +-
 .../wal/AbstractTestLogRolling.java |   9 +-
 .../hbase/regionserver/wal/TestDurability.java  |  93 +++--
 .../regionserver/wal/TestLogRollAbort.java  |  39 +++---
 .../wal/TestLogRollingNoCluster.java|  17 +--
 .../wal/TestWALActionsListener.java |  25 ++--
 .../TestReplicationSourceManager.java   |   2 +-
 .../regionserver/TestWALEntryStream.java|  13 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |  48 ---
 .../wal/TestBoundedRegionGroupingStrategy.java  |  16 ++-
 .../hadoop/hbase/wal/TestFSHLogProvider.java| 130 ---
 .../apache/hadoop/hbase/wal/TestSecureWAL.java  |  22 +---
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 127 +++---
 .../apache/hadoop/hbase/wal/TestWALMethods.java |   2 +-
 .../hbase/wal/TestWALReaderOnSecureWAL.java |  25 ++--
 .../apache/hadoop/hbase/wal/TestWALRootDir.java |  35 +++--
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  37 +++---
 .../hbase/wal/WALPerformanceEvaluation.java |  47 ---
 37 files changed, 591 insertions(+), 710 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/814d08a2/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index 18bb135..c8db903 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -24,30 +24,28 @@ import static org.junit.Assert.assertTrue;
 import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
 import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.MapReduceTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import 

[03/37] hbase git commit: HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs

2018-01-11 Thread zhangduo
HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint 
UT hangs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5d18c0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5d18c0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5d18c0f

Branch: refs/heads/HBASE-19397-branch-2
Commit: c5d18c0f2d1abd4df5ac79efd015c5bf8f86ccbc
Parents: 87077b4
Author: huzheng 
Authored: Wed Jan 10 15:00:30 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 11 17:11:23 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/master/HMaster.java   | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5d18c0f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index b3e8250..520c4b7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -38,7 +38,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
-import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
@@ -69,6 +68,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
@@ -3342,13 +3342,12 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   cpHost.preGetReplicationPeerConfig(peerId);
 }
 LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + 
peerId);
-Optional peerConfig =
-  this.replicationPeerManager.getPeerConfig(peerId);
-
+ReplicationPeerConfig peerConfig = 
this.replicationPeerManager.getPeerConfig(peerId)
+.orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
 if (cpHost != null) {
   cpHost.postGetReplicationPeerConfig(peerId);
 }
-return peerConfig.orElse(null);
+return peerConfig;
   }
 
   @Override



[17/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index b39e25e..a34364c 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -2318,1804 +2318,1803 @@
 2310Configuration confForWAL = new 
Configuration(conf);
 2311confForWAL.set(HConstants.HBASE_DIR, 
rootDir.toString());
 2312return (new WALFactory(confForWAL,
-2313
Collections.WALActionsListenersingletonList(new MetricsWAL()),
-2314"hregion-" + 
RandomStringUtils.randomNumeric(8))).
-2315
getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
-2316  }
-2317
-2318  /**
-2319   * Create a region with it's own WAL. 
Be sure to call
-2320   * {@link 
HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
-2321   */
-2322  public static HRegion 
createRegionAndWAL(final RegionInfo info, final Path rootDir,
-2323  final Configuration conf, final 
TableDescriptor htd) throws IOException {
-2324return createRegionAndWAL(info, 
rootDir, conf, htd, true);
-2325  }
-2326
-2327  /**
-2328   * Create a region with it's own WAL. 
Be sure to call
-2329   * {@link 
HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
-2330   */
-2331  public static HRegion 
createRegionAndWAL(final RegionInfo info, final Path rootDir,
-2332  final Configuration conf, final 
TableDescriptor htd, boolean initialize)
-2333  throws IOException {
-2334
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, 
null);
-2335WAL wal = createWal(conf, rootDir, 
info);
-2336return HRegion.createHRegion(info, 
rootDir, conf, htd, wal, initialize);
-2337  }
-2338
-2339  /**
-2340   * Returns all rows from the 
hbase:meta table.
-2341   *
-2342   * @throws IOException When reading 
the rows fails.
-2343   */
-2344  public Listbyte[] 
getMetaTableRows() throws IOException {
-2345// TODO: Redo using 
MetaTableAccessor class
-2346Table t = 
getConnection().getTable(TableName.META_TABLE_NAME);
-2347Listbyte[] rows = new 
ArrayList();
-2348ResultScanner s = t.getScanner(new 
Scan());
-2349for (Result result : s) {
-2350  LOG.info("getMetaTableRows: row 
- " +
-2351
Bytes.toStringBinary(result.getRow()));
-2352  rows.add(result.getRow());
-2353}
-2354s.close();
-2355t.close();
-2356return rows;
-2357  }
-2358
-2359  /**
-2360   * Returns all rows from the 
hbase:meta table for a given user table
-2361   *
-2362   * @throws IOException When reading 
the rows fails.
-2363   */
-2364  public Listbyte[] 
getMetaTableRows(TableName tableName) throws IOException {
-2365// TODO: Redo using 
MetaTableAccessor.
-2366Table t = 
getConnection().getTable(TableName.META_TABLE_NAME);
-2367Listbyte[] rows = new 
ArrayList();
-2368ResultScanner s = t.getScanner(new 
Scan());
-2369for (Result result : s) {
-2370  RegionInfo info = 
MetaTableAccessor.getRegionInfo(result);
-2371  if (info == null) {
-2372LOG.error("No region info for 
row " + Bytes.toString(result.getRow()));
-2373// TODO figure out what to do 
for this new hosed case.
-2374continue;
-2375  }
-2376
-2377  if 
(info.getTable().equals(tableName)) {
-2378LOG.info("getMetaTableRows: row 
- " +
-2379
Bytes.toStringBinary(result.getRow()) + info);
-2380rows.add(result.getRow());
-2381  }
-2382}
-2383s.close();
-2384t.close();
-2385return rows;
-2386  }
-2387
-2388  /*
-2389   * Find any other region server which 
is different from the one identified by parameter
-2390   * @param rs
-2391   * @return another region server
-2392   */
-2393  public HRegionServer 
getOtherRegionServer(HRegionServer rs) {
-2394for 
(JVMClusterUtil.RegionServerThread rst :
-2395  
getMiniHBaseCluster().getRegionServerThreads()) {
-2396  if (!(rst.getRegionServer() == 
rs)) {
-2397return rst.getRegionServer();
-2398  }
-2399}
-2400return null;
-2401  }
-2402
-2403  /**
-2404   * Tool to get the reference to the 
region server object that holds the
-2405   * region of the specified user 
table.
-2406   * It first searches for the meta rows 
that contain the region of the
-2407   * specified table, then gets the 
index of that RS, and finally retrieves
-2408   * the RS's reference.
-2409   * @param tableName user table to 
lookup in hbase:meta
-2410   * @return region server that holds 
it, null if the row doesn't exist
-2411   * @throws IOException
-2412   * @throws InterruptedException
-2413   */
-2414  public HRegionServer 

hbase-site git commit: INFRA-10751 Empty commit

2018-01-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f183e80f4 -> ba96e306f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/ba96e306
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/ba96e306
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/ba96e306

Branch: refs/heads/asf-site
Commit: ba96e306f8c8c00606c9dee9b30f06f5354aeef9
Parents: f183e80
Author: jenkins 
Authored: Thu Jan 11 15:30:54 2018 +
Committer: jenkins 
Committed: Thu Jan 11 15:30:54 2018 +

--

--




[48/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 4f95be0..d63eae9 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2018 The Apache Software Foundation
 
   File: 3487,
- Errors: 18209,
+ Errors: 18149,
  Warnings: 0,
  Infos: 0
   
@@ -3527,7 +3527,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -5053,7 +5053,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -5445,7 +5445,7 @@ under the License.
   0
 
 
-  5
+  3
 
   
   
@@ -8511,7 +8511,7 @@ under the License.
   0
 
 
-  54
+  52
 
   
   
@@ -11311,7 +11311,7 @@ under the License.
   0
 
 
-  13
+  12
 
   
   
@@ -12599,7 +12599,7 @@ under the License.
   0
 
 
-  6
+  1
 
   
   
@@ -12767,7 +12767,7 @@ under the License.
   0
 
 
-  5
+  2
 
   
   
@@ -14293,7 +14293,7 @@ under the License.
   0
 
 
-  22
+  18
 
   
   
@@ -14699,7 +14699,7 @@ under the License.
   0
 
 
-  8
+  7
 
   
   
@@ -20985,7 +20985,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -21013,7 +21013,7 @@ under the License.
   0
 
 
-  5
+  1
 
   
   
@@ -24331,7 +24331,7 @@ under the License.
   0
 
 
-  8
+  5
 
   
   
@@ -25675,7 +25675,7 @@ under the License.
   0
 
 
-  7
+  6
 
   
   
@@ -31051,7 +31051,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -32535,7 +32535,7 @@ under the License.
   0
 
 
-  18
+  17
 
   
   
@@ -37393,7 +37393,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -39969,7 +39969,7 @@ under the License.
   0
 
 
-  11
+  2
 
   
   
@@ -42755,7 +42755,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -44771,7 +44771,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -46493,7 +46493,7 @@ under the License.
   0
 
 
-  16
+  0
 
   
   
@@ -46815,7 +46815,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/coc.html
--
diff --git a/coc.html b/coc.html
index c863c60..24a5fd5 100644
--- 

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
index af41c2e..d4fb05c 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestCacheOnWriteInSchema
+public class TestCacheOnWriteInSchema
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests HFile cache-on-write functionality for 
data blocks, non-root
  index blocks, and Bloom filter blocks, as specified by the column 
family.
@@ -304,7 +304,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -313,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -322,7 +322,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -331,7 +331,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DIR
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DIR
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DIR
 
 
 
@@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 table
-private staticbyte[] table
+private staticbyte[] table
 
 
 
@@ -349,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 family
-private staticbyte[] family
+private staticbyte[] family
 
 
 
@@ -358,7 +358,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_KV
-private static finalint NUM_KV
+private static finalint NUM_KV
 
 See Also:
 Constant
 Field Values
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rand
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
 
 
 
@@ -380,7 +380,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_VALID_KEY_TYPES
-private static finalint NUM_VALID_KEY_TYPES
+private static finalint NUM_VALID_KEY_TYPES
 The number of valid key types possible in a store file
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cowType
-private finalTestCacheOnWriteInSchema.CacheOnWriteType
 cowType
+private finalTestCacheOnWriteInSchema.CacheOnWriteType
 cowType
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDescription
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String testDescription
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String testDescription
 
 
 
@@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-privateorg.apache.hadoop.hbase.regionserver.HRegion region
+privateorg.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -426,7 +426,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 store
-privateorg.apache.hadoop.hbase.regionserver.HStore store
+privateorg.apache.hadoop.hbase.regionserver.HStore store
 
 
 
@@ -435,7 +435,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 walFactory
-privateorg.apache.hadoop.hbase.wal.WALFactory walFactory
+privateorg.apache.hadoop.hbase.wal.WALFactory walFactory
 
 
 
@@ -444,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-privateorg.apache.hadoop.fs.FileSystem fs

[50/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/apidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
 
b/apidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
index 1e8d9c6..05a59b9 100644
--- 
a/apidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
+++ 
b/apidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
@@ -114,25 +114,35 @@
 
 
 default ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+ReplicationPeerConfigBuilder.putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+Adds all of the provided "raw" configuration entries to 
this.
+
 
 
 default ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+ReplicationPeerConfigBuilder.putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+Sets all of the provided serialized peer configuration 
data.
+
 
 
 ReplicationPeerConfigBuilder
 ReplicationPeerConfigBuilder.putConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue)
+Sets a "raw" configuration property for this replication 
peer.
+
 
 
 ReplicationPeerConfigBuilder
 ReplicationPeerConfigBuilder.putPeerData(byte[]key,
-   byte[]value)
+   byte[]value)
+Sets the serialized peer configuration data
+
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setBandwidth(longbandwidth)
+ReplicationPeerConfigBuilder.setBandwidth(longbandwidth)
+Sets the speed, in bytes per second, for any one 
RegionServer to replicate data to the peer.
+
 
 
 ReplicationPeerConfigBuilder
@@ -143,19 +153,28 @@
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+ReplicationPeerConfigBuilder.setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespaces)
+Sets the collection of namespaces which should not be 
replicated when all user tables are
+ configured to be replicated.
+
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCFsMap)
+ReplicationPeerConfigBuilder.setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCFsMap)
+Sets the mapping of table name to column families which 

[20/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
index d58f717..35d8362 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype  Project 
Dependencies
 
@@ -4168,7 +4168,7 @@ The following provides more details on the included 
cryptographic software:
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
index 18a9f2f..bf1e334 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype  Reactor 
Dependency Convergence
 
@@ -912,7 +912,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
index 95c2003..bdccdc9 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype  
Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
index 79797ff..482ab8a 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype  Project 
Dependency Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
index 4d538bd..9bf6e8e 100644
--- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
+++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - 

[11/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
index 25be178..19470c7 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHRegion
+public class TestHRegion
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Basic stand-alone testing of HRegion.  No clusters!
 
@@ -1010,7 +1010,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1019,7 +1019,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -1028,7 +1028,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 timeout
-public static finalorg.junit.rules.TestRule timeout
+public static finalorg.junit.rules.TestRule timeout
 
 
 
@@ -1037,7 +1037,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 thrown
-public finalorg.junit.rules.ExpectedException thrown
+public finalorg.junit.rules.ExpectedException thrown
 
 
 
@@ -1046,7 +1046,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COLUMN_FAMILY
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COLUMN_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -1059,7 +1059,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY_BYTES
-private static finalbyte[] COLUMN_FAMILY_BYTES
+private static finalbyte[] COLUMN_FAMILY_BYTES
 
 
 
@@ -1068,7 +1068,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-org.apache.hadoop.hbase.regionserver.HRegion region
+org.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -1077,7 +1077,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-protected staticHBaseTestingUtility TEST_UTIL
+protected staticHBaseTestingUtility TEST_UTIL
 
 
 
@@ -1086,7 +1086,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-public staticorg.apache.hadoop.conf.Configuration CONF
+public staticorg.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -1095,7 +1095,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dir
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
 
 
 
@@ -1104,7 +1104,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FILESYSTEM
-private staticorg.apache.hadoop.fs.FileSystem FILESYSTEM
+private staticorg.apache.hadoop.fs.FileSystem FILESYSTEM
 
 
 
@@ -1113,7 +1113,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_VERSIONS
-private finalint MAX_VERSIONS
+private finalint MAX_VERSIONS
 
 See Also:
 Constant
 Field Values
@@ -1126,7 +1126,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableName
-protectedorg.apache.hadoop.hbase.TableName tableName
+protectedorg.apache.hadoop.hbase.TableName tableName
 
 
 
@@ -1135,7 +1135,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 method
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String method
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String method
 
 
 
@@ -1144,7 +1144,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qual
-protected finalbyte[] qual
+protected finalbyte[] qual
 
 
 
@@ -1153,7 +1153,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qual1
-protected finalbyte[] qual1
+protected finalbyte[] qual1
 
 
 
@@ -1162,7 +1162,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qual2
-protected finalbyte[] qual2
+protected finalbyte[] qual2
 
 
 
@@ -1171,7 +1171,7 

[03/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
index 20e11b1..4b5f191 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestWALReaderOnSecureWAL
+public class TestWALReaderOnSecureWAL
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-static finalHBaseTestingUtility TEST_UTIL
+static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 value
-finalbyte[] value
+finalbyte[] value
 
 
 
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WAL_ENCRYPTION
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_ENCRYPTION
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_ENCRYPTION
 
 See Also:
 Constant
 Field Values
@@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 currentTest
-publicorg.junit.rules.TestName currentTest
+publicorg.junit.rules.TestName currentTest
 
 
 
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestWALReaderOnSecureWAL
-publicTestWALReaderOnSecureWAL()
+publicTestWALReaderOnSecureWAL()
 
 
 
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 writeWAL
-privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
+privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtblName,
booleanoffheap)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -326,7 +326,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testWALReaderOnSecureWALWithKeyValues
-publicvoidtestWALReaderOnSecureWALWithKeyValues()
+publicvoidtestWALReaderOnSecureWALWithKeyValues()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testWALReaderOnSecureWALWithOffheapKeyValues
-publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
+publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSecureWALInternal
-privatevoidtestSecureWALInternal(booleanoffheap)
+privatevoidtestSecureWALInternal(booleanoffheap)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
http://docs.oracle.com/javase/8/docs/api/java/io/FileNotFoundException.html?is-external=true;
 title="class or interface in java.io">FileNotFoundException
 
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSecureWALReaderOnWAL
-publicvoidtestSecureWALReaderOnWAL()
+publicvoidtestSecureWALReaderOnWAL()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:

[31/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
index 1f114e0..01e19b2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
@@ -25,1239 +25,1263 @@
 017 */
 018package org.apache.hadoop.hbase.tool;
 019
-020import static java.lang.String.format;
-021
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.InterruptedIOException;
-025import java.nio.ByteBuffer;
-026import java.util.ArrayDeque;
-027import java.util.ArrayList;
-028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.Deque;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.Optional;
-038import java.util.Set;
-039import java.util.SortedMap;
-040import java.util.TreeMap;
-041import java.util.UUID;
-042import java.util.concurrent.Callable;
-043import 
java.util.concurrent.ExecutionException;
-044import 
java.util.concurrent.ExecutorService;
-045import java.util.concurrent.Future;
-046import 
java.util.concurrent.LinkedBlockingQueue;
-047import 
java.util.concurrent.ThreadPoolExecutor;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import java.util.stream.Collectors;
-051
-052import 
org.apache.commons.lang3.mutable.MutableInt;
-053import 
org.apache.hadoop.conf.Configuration;
-054import 
org.apache.hadoop.conf.Configured;
-055import org.apache.hadoop.fs.FileStatus;
-056import org.apache.hadoop.fs.FileSystem;
-057import org.apache.hadoop.fs.Path;
-058import 
org.apache.hadoop.fs.permission.FsPermission;
-059import 
org.apache.hadoop.hbase.HBaseConfiguration;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066import 
org.apache.hadoop.hbase.client.Admin;
-067import 
org.apache.hadoop.hbase.client.ClientServiceCallable;
-068import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-069import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-070import 
org.apache.hadoop.hbase.client.Connection;
-071import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-072import 
org.apache.hadoop.hbase.client.RegionLocator;
-073import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-074import 
org.apache.hadoop.hbase.client.SecureBulkLoadClient;
-075import 
org.apache.hadoop.hbase.client.Table;
-076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-077import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-078import 
org.apache.hadoop.hbase.io.HFileLink;
-079import 
org.apache.hadoop.hbase.io.HalfStoreFileReader;
-080import 
org.apache.hadoop.hbase.io.Reference;
-081import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-082import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-083import 
org.apache.hadoop.hbase.io.hfile.HFile;
-084import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-085import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-086import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-087import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-088import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-089import 
org.apache.hadoop.hbase.regionserver.BloomType;
-090import 
org.apache.hadoop.hbase.regionserver.HStore;
-091import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-092import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-093import 
org.apache.hadoop.hbase.security.UserProvider;
-094import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-095import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-096import 
org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
-097import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-098import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimaps;
-099import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-100import 
org.apache.hadoop.hbase.util.Bytes;
-101import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-102import 
org.apache.hadoop.hbase.util.Pair;
-103import org.apache.hadoop.util.Tool;
-104import 
org.apache.hadoop.util.ToolRunner;
-105
-106/**
-107 * Tool to load the output of 
HFileOutputFormat into an existing table.
-108 */
-109@InterfaceAudience.Public

[05/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html 
b/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
index 4f02de9..124cf3b 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":9,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestLoadIncrementalHFiles
+public class TestLoadIncrementalHFiles
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test cases for the "load" half of the HFileOutputFormat 
bulk load functionality. These tests run
  faster than the full MR cluster tests in TestHFileOutputFormat
@@ -233,26 +233,43 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
   intfactor)
 
 
+static int
+loadHFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+  org.apache.hadoop.hbase.client.TableDescriptorhtd,
+  HBaseTestingUtilityutil,
+  byte[]fam,
+  byte[]qual,
+  booleanpreCreateTable,
+  byte[][]tableSplitKeys,
+  byte[][][]hfileRanges,
+  booleanuseMap,
+  booleandeleteFile,
+  booleancopyFiles,
+  intinitRowCount,
+  intfactor,
+  intdepth)
+
+
 private void
 runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
byte[][][]hfileRanges)
 
-
+
 private void
 runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
byte[][][]hfileRanges,
booleanuseMap)
 
-
+
 private void
 runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
byte[][]tableSplitKeys,
byte[][][]hfileRanges)
 
-
+
 private void
 runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
@@ -260,58 +277,59 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
byte[][][]hfileRanges,
booleanuseMap)
 
-
+
 private void
-runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.client.TableDescriptorhtd,
-   org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
booleanpreCreateTable,
byte[][]tableSplitKeys,
byte[][][]hfileRanges,
booleanuseMap,
-   booleancopyFiles)
+   booleancopyFiles,
+   intdepth)
 
-
+
 private void
-runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
org.apache.hadoop.hbase.TableNametableName,
org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
booleanpreCreateTable,
 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 90f1ea4..a11a540 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -573,7 +573,7 @@
 
 addEdits(WAL,
 RegionInfo, HTableDescriptor, int, MultiVersionConcurrencyControl, 
NavigableMapbyte[], Integer) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL
 
-addEdits(WAL,
 HRegionInfo, HTableDescriptor, int, NavigableMapbyte[], 
Integer) - Method in class org.apache.hadoop.hbase.wal.TestFSHLogProvider
+addEdits(WAL,
 RegionInfo, TableDescriptor, int, NavigableMapbyte[], 
Integer) - Method in class org.apache.hadoop.hbase.wal.TestFSHLogProvider
 
 addedQualifier
 - Variable in class org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor
 
@@ -810,7 +810,7 @@
 
 The AccessControlLists.addUserPermission may throw 
exception before closing the table.
 
-addWALEdits(TableName,
 HRegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, 
NavigableMapbyte[], Integer, MultiVersionConcurrencyControl) 
- Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
+addWALEdits(TableName,
 RegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, NavigableMapbyte[], 
Integer, MultiVersionConcurrencyControl) - Method in class 
org.apache.hadoop.hbase.coprocessor.TestWALObserver
 
 addWALEdits(TableName,
 HRegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, HTableDescriptor, 
MultiVersionConcurrencyControl, NavigableMapbyte[], Integer) 
- Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
@@ -1194,6 +1194,10 @@
 
 AlwaysDelete()
 - Constructor for class org.apache.hadoop.hbase.master.cleaner.TestCleanerChore.AlwaysDelete
 
+AlwaysIncludeAndSeekNextRowFilter()
 - Constructor for class org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeAndSeekNextRowFilter
+
+AlwaysIncludeFilter()
 - Constructor for class org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeFilter
+
 AlwaysNextColFilter()
 - Constructor for class org.apache.hadoop.hbase.filter.TestFilterList.AlwaysNextColFilter
 
 am
 - Variable in class org.apache.hadoop.hbase.master.assignment.TestAssignmentManager
@@ -1256,7 +1260,7 @@
 
 APPEND_VALUE
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestPassCustomCellViaRegionObserver
 
-appendCompactionEvent(WALProvider.Writer,
 HRegionInfo, String[], String) - Static method in class 
org.apache.hadoop.hbase.wal.TestWALSplit
+appendCompactionEvent(WALProvider.Writer,
 RegionInfo, String[], String) - Static method in class 
org.apache.hadoop.hbase.wal.TestWALSplit
 
 appendCoprocessor(Configuration,
 String, String) - Static method in class 
org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil
 
@@ -6519,14 +6523,16 @@
 
 createBasic1FamilyHTD(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
-createBasic3FamilyHRegionInfo(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
-
 createBasic3FamilyHRegionInfo(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
 createBasic3FamilyHTD(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
 
 createBasic3FamilyHTD(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
+createBasicHRegionInfo(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
+
+Creates an HRI around an HTD that has 
tableName.
+
 createBatchScan()
 - Static method in class org.apache.hadoop.hbase.client.AbstractTestAsyncTableScan
 
 createBatchSmallResultSizeScan()
 - Static method in class org.apache.hadoop.hbase.client.AbstractTestAsyncTableScan
@@ -6790,7 +6796,9 @@
 Create an HFile with the given number of rows between a 
given
  start key and end key @ family:qualifier.
 
-createHRegion(byte[],
 String, WAL, Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
+createHRegion(WALFactory,
 Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
+
+createHRegion(TableDescriptor,
 RegionInfo, String, WAL, Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
 
 createHTableDescriptor(TableName,
 byte[]...) - Static method in class 
org.apache.hadoop.hbase.master.procedure.TestCloneSnapshotProcedure
 
@@ -10584,7 +10592,7 @@
 
 FailingDummyReplicator(ListWAL.Entry,
 int) - Constructor for class org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator
 
-FailingHRegionFileSystem(Configuration,
 FileSystem, Path, HRegionInfo) - Constructor for class 

[07/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestDurability.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestDurability.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestDurability.html
index 5f0e47f..8a340d3 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestDurability.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/wal/TestDurability.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":9,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestDurability
+public class TestDurability
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests for WAL write durability
 
@@ -155,14 +155,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 FS
 
 
+org.junit.rules.TestName
+name
+
+
 private static byte[]
 ROW
 
-
+
 private static HBaseTestingUtility
 TEST_UTIL
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 walProvider
 
@@ -200,48 +204,57 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private 
org.apache.hadoop.hbase.regionserver.HRegion
-createHRegion(byte[]tableName,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcallingMethod,
- org.apache.hadoop.hbase.wal.WALlog,
+createHRegion(org.apache.hadoop.hbase.client.TableDescriptortd,
+ org.apache.hadoop.hbase.client.RegionInfoinfo,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringdir,
+ org.apache.hadoop.hbase.wal.WALwal,
  
org.apache.hadoop.hbase.client.Durabilitydurability)
 
 
+private 
org.apache.hadoop.hbase.regionserver.HRegion
+createHRegion(org.apache.hadoop.hbase.wal.WALFactorywals,
+ 
org.apache.hadoop.hbase.client.Durabilitydurability)
+
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in java.lang">Iterablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
 data()
 
-
+
 private 
org.apache.hadoop.hbase.client.Put
 newPut(org.apache.hadoop.hbase.client.Durabilitydurability)
 
-
+
 void
 setUp()
 
-
+
 static void
 setUpBeforeClass()
 
-
+
 void
 tearDown()
 
-
+
 static void
 tearDownAfterClass()
 
-
+
 void
 testDurability()
 
-
+
 void
 testIncrement()
 
-
+
 void
-testIncrementWithReturnResultsSetToFalse()
+testIncrementWithReturnResultsSetToFalse()
+Test when returnResults set to false in increment it should 
not return the result instead it
+ resturn null.
+
 
-
+
 private void
 verifyWALCount(org.apache.hadoop.hbase.wal.WALFactorywals,
   org.apache.hadoop.hbase.wal.WALlog,
@@ -275,7 +288,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -284,7 +297,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FS
-private staticorg.apache.hadoop.fs.FileSystem FS
+private staticorg.apache.hadoop.fs.FileSystem FS
 
 
 
@@ -293,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CLUSTER
-private staticorg.apache.hadoop.hdfs.MiniDFSCluster CLUSTER
+private staticorg.apache.hadoop.hdfs.MiniDFSCluster CLUSTER
 
 
 
@@ -302,7 +315,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-private staticorg.apache.hadoop.conf.Configuration CONF
+private staticorg.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -311,7 +324,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DIR
-private staticorg.apache.hadoop.fs.Path DIR
+private staticorg.apache.hadoop.fs.Path DIR
 
 
 
@@ -320,7 +333,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private staticbyte[] FAMILY
+private staticbyte[] FAMILY
 
 
 
@@ -329,7 +342,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ROW
-private staticbyte[] ROW
+private staticbyte[] ROW
 
 
 
@@ -338,16 

[27/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
index 0b621b6..69a8636 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.DisabledWAL.html
@@ -82,7 +82,7 @@
 074  }
 075
 076  @Override
-077  public WAL getWAL(final byte[] 
identifier, byte[] namespace) throws IOException {
+077  public WAL getWAL(RegionInfo region) 
throws IOException {
 078return disabled;
 079  }
 080

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.html
index 0b621b6..69a8636 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/DisabledWALProvider.html
@@ -82,7 +82,7 @@
 074  }
 075
 076  @Override
-077  public WAL getWAL(final byte[] 
identifier, byte[] namespace) throws IOException {
+077  public WAL getWAL(RegionInfo region) 
throws IOException {
 078return disabled;
 079  }
 080

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
index 4fec5eb..63c9ca7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
@@ -35,254 +35,262 @@
 027import java.util.List;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030
+030import java.util.concurrent.locks.Lock;
 031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import org.slf4j.Logger;
-034import org.slf4j.LoggerFactory;
-035// imports for classes still in 
regionserver.wal
-036import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-037import 
org.apache.hadoop.hbase.util.Bytes;
-038import 
org.apache.hadoop.hbase.util.IdLock;
-039
-040/**
-041 * A WAL Provider that returns a WAL per 
group of regions.
-042 *
-043 * This provider follows the decorator 
pattern and mainly holds the logic for WAL grouping.
-044 * WAL creation/roll/close is delegated 
to {@link #DELEGATE_PROVIDER}
-045 *
-046 * Region grouping is handled via {@link 
RegionGroupingStrategy} and can be configured via the
-047 * property 
"hbase.wal.regiongrouping.strategy". Current strategy choices are
-048 * ul
-049 *   
liemdefaultStrategy/em : Whatever strategy this version 
of HBase picks. currently
-050 *  
"bounded"./li
-051 *   
liemidentity/em : each region belongs to its own 
group./li
-052 *   
liembounded/em : bounded number of groups and region 
evenly assigned to each group./li
-053 * /ul
-054 * Optionally, a FQCN to a custom 
implementation may be given.
-055 */
-056@InterfaceAudience.Private
-057public class RegionGroupingProvider 
implements WALProvider {
-058  private static final Logger LOG = 
LoggerFactory.getLogger(RegionGroupingProvider.class);
-059
-060  /**
-061   * Map identifiers to a group number.
-062   */
-063  public static interface 
RegionGroupingStrategy {
-064String GROUP_NAME_DELIMITER = ".";
-065
-066/**
-067 * Given an identifier and a 
namespace, pick a group.
-068 */
-069String group(final byte[] identifier, 
byte[] namespace);
-070void init(Configuration config, 
String providerId);
-071  }
-072
-073  /**
-074   * Maps between configuration names for 
strategies and implementation classes.
-075   */
-076  static enum Strategies {
-077
defaultStrategy(BoundedGroupingStrategy.class),
-078
identity(IdentityGroupingStrategy.class),
-079
bounded(BoundedGroupingStrategy.class),
-080
namespace(NamespaceGroupingStrategy.class);
-081
-082final Class? extends 
RegionGroupingStrategy clazz;
-083Strategies(Class? extends 
RegionGroupingStrategy clazz) {
-084  this.clazz = clazz;
-085}
-086  }
-087
-088  /**
-089   * instantiate a strategy from a config 

[29/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
index 972d795..d4f4a3d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
@@ -39,489 +39,490 @@
 031import org.apache.hadoop.fs.Path;
 032import 
org.apache.hadoop.hbase.HConstants;
 033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.yetus.audience.InterfaceStability;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
-039import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.FSUtils;
-042import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-043import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-044
-045/**
-046 * Base class of a WAL Provider that 
returns a single thread safe WAL that writes to Hadoop FS. By
-047 * default, this implementation picks a 
directory in Hadoop FS based on a combination of
-048 * ul
-049 * lithe HBase root directory
-050 * 
liHConstants.HREGION_LOGDIR_NAME
-051 * lithe given factory's 
factoryId (usually identifying the regionserver by host:port)
-052 * /ul
-053 * It also uses the providerId to 
differentiate among files.
-054 */
-055@InterfaceAudience.Private
-056@InterfaceStability.Evolving
-057public abstract class 
AbstractFSWALProviderT extends AbstractFSWAL? implements 
WALProvider {
-058
-059  private static final Logger LOG = 
LoggerFactory.getLogger(AbstractFSWALProvider.class);
-060
-061  /** Separate old log into different dir 
by regionserver name **/
-062  public static final String 
SEPARATE_OLDLOGDIR = "hbase.separate.oldlogdir.by.regionserver";
-063  public static final boolean 
DEFAULT_SEPARATE_OLDLOGDIR = false;
-064
-065  // Only public so classes back in 
regionserver.wal can access
-066  public interface Reader extends 
WAL.Reader {
-067/**
-068 * @param fs File system.
-069 * @param path Path.
-070 * @param c Configuration.
-071 * @param s Input stream that may 
have been pre-opened by the caller; may be null.
-072 */
-073void init(FileSystem fs, Path path, 
Configuration c, FSDataInputStream s) throws IOException;
-074  }
-075
-076  protected volatile T wal;
-077  protected WALFactory factory = null;
-078  protected Configuration conf = null;
-079  protected 
ListWALActionsListener listeners = null;
-080  protected String providerId = null;
-081  protected AtomicBoolean initialized = 
new AtomicBoolean(false);
-082  // for default wal provider, logPrefix 
won't change
-083  protected String logPrefix = null;
-084
-085  /**
-086   * we synchronized on walCreateLock to 
prevent wal recreation in different threads
-087   */
-088  private final Object walCreateLock = 
new Object();
-089
-090  /**
-091   * @param factory factory that made us, 
identity used for FS layout. may not be null
-092   * @param conf may not be null
-093   * @param listeners may be null
-094   * @param providerId differentiate 
between providers from one factory, used for FS layout. may be
-095   *  null
-096   */
-097  @Override
-098  public void init(WALFactory factory, 
Configuration conf, ListWALActionsListener listeners,
-099  String providerId) throws 
IOException {
-100if (!initialized.compareAndSet(false, 
true)) {
-101  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-102}
-103this.factory = factory;
-104this.conf = conf;
-105this.listeners = listeners;
-106this.providerId = providerId;
-107// get log prefix
-108StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-109if (providerId != null) {
-110  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-111sb.append(providerId);
-112  } else {
-113
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-114  }
-115}
-116logPrefix = sb.toString();
-117doInit(conf);
-118  }
-119
-120  @Override
-121  public ListWAL getWALs() {
-122if (wal == null) {
-123  return Collections.emptyList();
-124}
-125ListWAL wals = new 
ArrayList(1);
-126wals.add(wal);
-127return wals;
-128  }
-129
-130  @Override
-131  public T getWAL(byte[] identifier, 
byte[] namespace) throws IOException {
-132T walCopy = wal;
-133if (walCopy == null) {
-134  // only lock when need to create 
wal, and need to lock since

[49/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index b7b7e6c..527ae34 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 3487
 0
 0
-18209
+18149
 
 Files
 
@@ -2957,7 +2957,7 @@
 org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
 0
 0
-2
+1
 
 org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java
 0
@@ -5177,7 +5177,7 @@
 org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
 0
 0
-5
+2
 
 org/apache/hadoop/hbase/mapreduce/TextSortReducer.java
 0
@@ -7842,7 +7842,7 @@
 org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
 0
 0
-11
+2
 
 org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java
 0
@@ -7889,16 +7889,6 @@
 0
 4
 
-org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
-0
-0
-1
-
-org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
-0
-0
-1
-
 org/apache/hadoop/hbase/regionserver/TestCompactionState.java
 0
 0
@@ -7927,7 +7917,7 @@
 org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
 0
 0
-18
+17
 
 org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java
 0
@@ -8322,7 +8312,7 @@
 org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
 0
 0
-5
+1
 
 org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
 0
@@ -8764,22 +8754,12 @@
 0
 1
 
-org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
-0
-0
-16
-
 org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java
 0
 0
 3
-
-org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java
-0
-0
-1
 
-org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
+org/apache/hadoop/hbase/regionserver/wal/TestFSWALEntry.java
 0
 0
 1
@@ -10292,7 +10272,7 @@
 org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 0
 0
-8
+7
 
 org/apache/hadoop/hbase/tool/MapreduceTestingShim.java
 0
@@ -11222,7 +11202,7 @@
 org/apache/hadoop/hbase/wal/IOTestProvider.java
 0
 0
-7
+6
 
 org/apache/hadoop/hbase/wal/NamespaceGroupingStrategy.java
 0
@@ -11234,95 +11214,85 @@
 0
 2
 
-org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
-0
-0
-1
-
 org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java
 0
 0
-4
-
+3
+
 org/apache/hadoop/hbase/wal/TestFSHLogProvider.java
 0
 0
-8
-
+5
+
 org/apache/hadoop/hbase/wal/TestWALFactory.java
 0
 0
-22
-
+18
+
 org/apache/hadoop/hbase/wal/TestWALFiltering.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/wal/TestWALMethods.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java
 0
 0
-2
-
+1
+
 org/apache/hadoop/hbase/wal/TestWALRootDir.java
 0
 0
-6
-
+1
+
 org/apache/hadoop/hbase/wal/TestWALSplit.java
 0
 0
-54
-
+52
+
 org/apache/hadoop/hbase/wal/WAL.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/wal/WALEdit.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/wal/WALFactory.java
 0
 0
-5
-
+3
+
 org/apache/hadoop/hbase/wal/WALKey.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/wal/WALKeyImpl.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
 0
 0
-13
-
+12
+
 org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
 0
 0
 17
-
-org/apache/hadoop/hbase/wal/WALProvider.java
-0
-0
-1
 
 org/apache/hadoop/hbase/wal/WALSplitter.java
 0
@@ -11444,12 +11414,12 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly;>LeftCurly
-219
+218
 Error
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-2054
+2053
 Error
 
 coding
@@ -11502,7 +11472,7 @@
 
 imports
 http://checkstyle.sourceforge.net/config_imports.html#AvoidStarImport;>AvoidStarImport
-106
+105
 Error
 
 
@@ -11512,7 +11482,7 @@
 sortStaticImportsAlphabetically: true
 groups: 
*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded
 option: top
-1938
+1918
 Error
 
 
@@ -11524,7 +11494,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-154
+153
 Error
 
 indentation
@@ -11535,7 +11505,7 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-5359
+5335
 Error
 
 javadoc
@@ -11547,7 +11517,7 @@
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-4053
+4051
 Error
 
 misc
@@ -11565,7 +11535,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-1719
+1710
 Error
 
 
@@ -11580,7 +11550,7 @@
 
 
 http://checkstyle.sourceforge.net/config_whitespace.html#MethodParamPad;>MethodParamPad
-200
+199
 Error
 
 
@@ -14528,361 +14498,361 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2411
+2410
 
 Error
 javadoc
 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/integration.html
--
diff --git a/hbase-build-configuration/integration.html 
b/hbase-build-configuration/integration.html
index 0a415f7..e2dd2e2 100644
--- a/hbase-build-configuration/integration.html
+++ b/hbase-build-configuration/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/issue-tracking.html
--
diff --git a/hbase-build-configuration/issue-tracking.html 
b/hbase-build-configuration/issue-tracking.html
index 2604681..0062a19 100644
--- a/hbase-build-configuration/issue-tracking.html
+++ b/hbase-build-configuration/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/license.html
--
diff --git a/hbase-build-configuration/license.html 
b/hbase-build-configuration/license.html
index 6ae2a03..989d0f7 100644
--- a/hbase-build-configuration/license.html
+++ b/hbase-build-configuration/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/mail-lists.html
--
diff --git a/hbase-build-configuration/mail-lists.html 
b/hbase-build-configuration/mail-lists.html
index b98183f..8e37d21 100644
--- a/hbase-build-configuration/mail-lists.html
+++ b/hbase-build-configuration/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Mailing 
Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/plugin-management.html
--
diff --git a/hbase-build-configuration/plugin-management.html 
b/hbase-build-configuration/plugin-management.html
index 1c1296b..34048dc 100644
--- a/hbase-build-configuration/plugin-management.html
+++ b/hbase-build-configuration/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugin 
Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/plugins.html
--
diff --git a/hbase-build-configuration/plugins.html 
b/hbase-build-configuration/plugins.html
index 9f411cd..e4617dc 100644
--- a/hbase-build-configuration/plugins.html
+++ b/hbase-build-configuration/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugins
 
@@ -214,7 +214,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 


[18/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 4430aed..e714bcf 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -4270,7 +4270,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
+public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
  
org.apache.hadoop.fs.PathrootDir,
  
org.apache.hadoop.conf.Configurationconf,
  TableDescriptorhtd)
@@ -4289,7 +4289,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
+public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
  
org.apache.hadoop.fs.PathrootDir,
  
org.apache.hadoop.conf.Configurationconf,
  TableDescriptorhtd,
@@ -4309,7 +4309,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table.
 
@@ -4324,7 +4324,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table for a given user 
table
 
@@ -4339,7 +4339,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getOtherRegionServer
-publicHRegionServergetOtherRegionServer(HRegionServerrs)
+publicHRegionServergetOtherRegionServer(HRegionServerrs)
 
 
 
@@ -4348,7 +4348,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getRSForFirstRegionInTable
-publicHRegionServergetRSForFirstRegionInTable(TableNametableName)
+publicHRegionServergetRSForFirstRegionInTable(TableNametableName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Tool to get the reference to the region server object that 
holds the
@@ -4373,7 +4373,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 startMiniMapReduceCluster
-publicorg.apache.hadoop.mapred.MiniMRClusterstartMiniMapReduceCluster()
+publicorg.apache.hadoop.mapred.MiniMRClusterstartMiniMapReduceCluster()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Starts a MiniMRCluster with a default number of
  TaskTracker's.
@@ -4389,7 +4389,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 shutdownMiniMapReduceCluster
-publicvoidshutdownMiniMapReduceCluster()
+publicvoidshutdownMiniMapReduceCluster()
 Stops the previously started 
MiniMRCluster.
 
 
@@ -4399,7 +4399,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createMockRegionServerService
-publicRegionServerServicescreateMockRegionServerService()
+publicRegionServerServicescreateMockRegionServerService()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Create a stubbed out RegionServerService, mainly for 
getting FS.
 
@@ -4414,7 

[42/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html 
b/devapidocs/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
index 4d4db93..b88ce1d 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public abstract class AbstractFSWALProviderT
 extends AbstractFSWAL?
+public abstract class AbstractFSWALProviderT
 extends AbstractFSWAL?
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements WALProvider
 Base class of a WAL Provider that returns a single thread 
safe WAL that writes to Hadoop FS. By
@@ -360,8 +360,7 @@ implements 
 
 T
-getWAL(byte[]identifier,
-  byte[]namespace)
+getWAL(RegionInforegion)
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
@@ -468,7 +467,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -477,7 +476,7 @@ implements 
 
 SEPARATE_OLDLOGDIR
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SEPARATE_OLDLOGDIR
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SEPARATE_OLDLOGDIR
 Separate old log into different dir by regionserver 
name
 
 See Also:
@@ -491,7 +490,7 @@ implements 
 
 DEFAULT_SEPARATE_OLDLOGDIR
-public static finalboolean DEFAULT_SEPARATE_OLDLOGDIR
+public static finalboolean DEFAULT_SEPARATE_OLDLOGDIR
 
 See Also:
 Constant
 Field Values
@@ -504,7 +503,7 @@ implements 
 
 wal
-protected volatileT extends AbstractFSWAL? wal
+protected volatileT extends AbstractFSWAL? wal
 
 
 
@@ -513,7 +512,7 @@ implements 
 
 factory
-protectedWALFactory factory
+protectedWALFactory factory
 
 
 
@@ -522,7 +521,7 @@ implements 
 
 conf
-protectedorg.apache.hadoop.conf.Configuration conf
+protectedorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -531,7 +530,7 @@ implements 
 
 listeners
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALActionsListener listeners
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALActionsListener listeners
 
 
 
@@ -540,7 +539,7 @@ implements 
 
 providerId
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String providerId
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String providerId
 
 
 
@@ -549,7 +548,7 @@ implements 
 
 initialized
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean initialized
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean initialized
 
 
 
@@ -558,7 +557,7 @@ implements 
 
 logPrefix
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String logPrefix
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String logPrefix
 
 
 
@@ -567,7 +566,7 @@ implements 
 
 walCreateLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object walCreateLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object walCreateLock
 we synchronized on walCreateLock to prevent wal recreation 
in different threads
 
 
@@ -577,7 +576,7 @@ implements 
 
 WAL_FILE_NAME_DELIMITER
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_FILE_NAME_DELIMITER
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_FILE_NAME_DELIMITER
 
 See Also:
 Constant
 Field Values
@@ -590,7 +589,7 @@ implements 
 
 META_WAL_PROVIDER_ID
-public static 

[38/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index e743560..163ade0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -2124,1654 +2124,1642 @@
 2116return healthy;
 2117  }
 2118
-2119  private static final byte[] 
UNSPECIFIED_REGION = new byte[]{};
-2120
-2121  @Override
-2122  public ListWAL getWALs() 
throws IOException {
-2123return walFactory.getWALs();
-2124  }
-2125
-2126  @Override
-2127  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
-2128WAL wal;
-2129// _ROOT_ and hbase:meta regions 
have separate WAL.
-2130if (regionInfo != null  
regionInfo.isMetaRegion()
-2131 
regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-2132  wal = 
walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
-2133} else if (regionInfo == null) {
-2134  wal = 
walFactory.getWAL(UNSPECIFIED_REGION, null);
-2135} else {
-2136  byte[] namespace = 
regionInfo.getTable().getNamespace();
-2137  wal = 
walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
-2138}
-2139if (this.walRoller != null) {
-2140  this.walRoller.addWAL(wal);
-2141}
-2142return wal;
-2143  }
-2144
-2145  public LogRoller getWalRoller() {
-2146return walRoller;
-2147  }
-2148
-2149  @Override
-2150  public Connection getConnection() {
-2151return getClusterConnection();
-2152  }
-2153
-2154  @Override
-2155  public ClusterConnection 
getClusterConnection() {
-2156return this.clusterConnection;
-2157  }
-2158
-2159  @Override
-2160  public MetaTableLocator 
getMetaTableLocator() {
-2161return this.metaTableLocator;
-2162  }
-2163
-2164  @Override
-2165  public void stop(final String msg) {
-2166stop(msg, false, 
RpcServer.getRequestUser().orElse(null));
-2167  }
-2168
-2169  /**
-2170   * Stops the regionserver.
-2171   * @param msg Status message
-2172   * @param force True if this is a 
regionserver abort
-2173   * @param user The user executing the 
stop request, or null if no user is associated
-2174   */
-2175  public void stop(final String msg, 
final boolean force, final User user) {
-2176if (!this.stopped) {
-2177  LOG.info("* STOPPING region 
server '" + this + "' *");
-2178  if (this.rsHost != null) {
-2179// when forced via abort don't 
allow CPs to override
-2180try {
-2181  this.rsHost.preStop(msg, 
user);
-2182} catch (IOException ioe) {
-2183  if (!force) {
-2184LOG.warn("The region server 
did not stop", ioe);
-2185return;
-2186  }
-2187  LOG.warn("Skipping coprocessor 
exception on preStop() due to forced shutdown", ioe);
-2188}
-2189  }
-2190  this.stopped = true;
-2191  LOG.info("STOPPED: " + msg);
-2192  // Wakes run() if it is sleeping
-2193  sleeper.skipSleepCycle();
-2194}
-2195  }
-2196
-2197  public void waitForServerOnline(){
-2198while (!isStopped()  
!isOnline()) {
-2199  synchronized (online) {
-2200try {
-2201  online.wait(msgInterval);
-2202} catch (InterruptedException 
ie) {
-2203  
Thread.currentThread().interrupt();
-2204  break;
-2205}
-2206  }
-2207}
-2208  }
-2209
-2210  @Override
-2211  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2212  throws KeeperException, 
IOException {
-2213HRegion r = context.getRegion();
-2214long masterSystemTime = 
context.getMasterSystemTime();
-2215rpcServices.checkOpen();
-2216LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2217// Do checks to see if we need to 
compact (references or too many files)
-2218for (HStore s : r.stores.values()) 
{
-2219  if (s.hasReferences() || 
s.needsCompaction()) {
-2220
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2221  }
-}
-2223long openSeqNum = 
r.getOpenSeqNum();
-2224if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2225  // If we opened a region, we 
should have read some sequence number from it.
-2226  LOG.error("No sequence number 
found when opening " +
-2227
r.getRegionInfo().getRegionNameAsString());
-2228  openSeqNum = 0;
-2229}
+2119  @Override
+2120  public ListWAL getWALs() 
throws IOException {
+2121return walFactory.getWALs();
+2122  }
+2123
+2124  @Override
+2125  public WAL getWAL(RegionInfo 
regionInfo) throws 

[10/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.html
index 7534235..762219a 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHRegionReplayEvents
+public class TestHRegionReplayEvents
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests of HRegion methods for replaying flush, compaction, 
region open, etc events for secondary
  region replicas
@@ -164,7 +164,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 name
 
 
-private 
org.apache.hadoop.hbase.HRegionInfo
+private 
org.apache.hadoop.hbase.client.RegionInfo
 primaryHri
 
 
@@ -192,7 +192,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 rss
 
 
-private 
org.apache.hadoop.hbase.HRegionInfo
+private 
org.apache.hadoop.hbase.client.RegionInfo
 secondaryHri
 
 
@@ -525,7 +525,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -534,7 +534,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -543,7 +543,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private staticHBaseTestingUtility TEST_UTIL
+private staticHBaseTestingUtility TEST_UTIL
 
 
 
@@ -552,7 +552,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-public staticorg.apache.hadoop.conf.Configuration CONF
+public staticorg.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -561,7 +561,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dir
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
 
 
 
@@ -570,7 +570,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 families
-privatebyte[][] families
+privatebyte[][] families
 
 
 
@@ -579,7 +579,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableName
-protectedbyte[] tableName
+protectedbyte[] tableName
 
 
 
@@ -588,7 +588,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 method
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String method
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String method
 
 
 
@@ -597,7 +597,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 row
-protected finalbyte[] row
+protected finalbyte[] row
 
 
 
@@ -606,7 +606,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 row2
-protected finalbyte[] row2
+protected finalbyte[] row2
 
 
 
@@ -615,7 +615,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cq
-protectedbyte[] cq
+protectedbyte[] cq
 
 
 
@@ -624,7 +624,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rootDir
-privateorg.apache.hadoop.fs.Path rootDir
+privateorg.apache.hadoop.fs.Path rootDir
 
 
 
@@ -633,7 +633,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 htd
-privateorg.apache.hadoop.hbase.client.TableDescriptor htd
+privateorg.apache.hadoop.hbase.client.TableDescriptor htd
 
 
 
@@ -642,7 +642,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 time
-privatelong time
+privatelong time
 
 
 
@@ -651,7 +651,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rss
-privateorg.apache.hadoop.hbase.regionserver.RegionServerServices rss
+privateorg.apache.hadoop.hbase.regionserver.RegionServerServices rss
 
 
 
@@ -660,7 +660,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 primaryHri
-privateorg.apache.hadoop.hbase.HRegionInfo primaryHri
+privateorg.apache.hadoop.hbase.client.RegionInfo primaryHri
 
 
 
@@ -669,7 +669,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 secondaryHri

[46/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 22e35af..74904fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -569,34 +569,30 @@ implements uncaughtExceptionHandler
 
 
-private static byte[]
-UNSPECIFIED_REGION
-
-
 private UserProvider
 userProvider
 
-
+
 protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 useThisHostnameInstead
 
-
+
 protected WALFactory
 walFactory
 
-
+
 protected HFileSystem
 walFs
 
-
+
 protected LogRoller
 walRoller
 
-
+
 private org.apache.hadoop.fs.Path
 walRootDir
 
-
+
 protected ZKWatcher
 zooKeeper
 
@@ -2405,22 +2401,13 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 
-
-
-
-
-
-UNSPECIFIED_REGION
-private static finalbyte[] UNSPECIFIED_REGION
-
-
 
 
 
 
 
 movedRegions
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
 
 
 
@@ -2429,7 +2416,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 TIMEOUT_REGION_MOVED
-private static finalint TIMEOUT_REGION_MOVED
+private static finalint TIMEOUT_REGION_MOVED
 
 See Also:
 Constant
 Field Values
@@ -3201,7 +3188,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getWALs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALgetWALs()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWALgetWALs()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -3220,7 +3207,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getWAL
-publicWALgetWAL(RegionInforegionInfo)
+publicWALgetWAL(RegionInforegionInfo)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -3239,7 +3226,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getWalRoller
-publicLogRollergetWalRoller()
+publicLogRollergetWalRoller()
 
 
 
@@ -3248,7 +3235,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getConnection
-publicConnectiongetConnection()
+publicConnectiongetConnection()
 Description copied from 
interface:Server
 Returns a reference to the servers' connection.
 
@@ -3266,7 +3253,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getClusterConnection
-publicClusterConnectiongetClusterConnection()
+publicClusterConnectiongetClusterConnection()
 Description copied from 
interface:Server
 Returns a reference to the servers' cluster connection. 
Prefer Server.getConnection().
 
@@ -3284,7 +3271,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 getMetaTableLocator
-publicMetaTableLocatorgetMetaTableLocator()
+publicMetaTableLocatorgetMetaTableLocator()
 Description copied from 
interface:Server
 Returns instance of MetaTableLocator
  running inside this server. This MetaServerLocator is started and stopped by 
server, clients
@@ -3303,7 +3290,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 stop
-publicvoidstop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg)
+publicvoidstop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg)
 Description copied from 
interface:Stoppable
 Stop this service.
  Implementers should favor logging errors over throwing 
RuntimeExceptions.
@@ -3321,7 +3308,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja
 
 
 stop
-publicvoidstop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg,

[47/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index cdd966c..fcff874 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -208,11 +208,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.procedure2.LockType
+org.apache.hadoop.hbase.procedure2.LockedResourceType
 org.apache.hadoop.hbase.procedure2.Procedure.LockState
-org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
 org.apache.hadoop.hbase.procedure2.RootProcedureState.State
-org.apache.hadoop.hbase.procedure2.LockedResourceType
-org.apache.hadoop.hbase.procedure2.LockType
+org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index d510ea9..e7a6a06 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -206,12 +206,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
-org.apache.hadoop.hbase.quotas.QuotaScope
-org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
+org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.QuotaScope
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 0edf605..a0bba7b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 seqNum
-private finallong seqNum
+private finallong seqNum
 
 
 
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ts
-private finallong ts
+private finallong ts
 
 
 
@@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MovedRegionInfo
-publicMovedRegionInfo(ServerNameserverName,
+publicMovedRegionInfo(ServerNameserverName,
longcloseSeqNum)
 
 
@@ -271,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getSeqNum
-publiclonggetSeqNum()
+publiclonggetSeqNum()
 
 
 
@@ -289,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMoveTime
-publiclonggetMoveTime()

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
index fadf667..14b2b69 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
@@ -7,269 +7,269 @@
 
 
 001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.wal;
 019
-020
-021package org.apache.hadoop.hbase.wal;
-022
-023import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.IOException;
-026import java.io.InterruptedIOException;
-027import java.util.Collections;
-028import java.util.List;
-029import java.util.OptionalLong;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031
-032import 
org.apache.hadoop.conf.Configuration;
-033import org.apache.hadoop.fs.FileSystem;
-034import org.apache.hadoop.fs.Path;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038// imports for things that haven't moved 
from regionserver.wal yet.
-039import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-040import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-041import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-042import 
org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
-043import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-044import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-045import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-046import 
org.apache.hadoop.hbase.wal.WAL.Reader;
-047import 
org.apache.hadoop.hbase.wal.WALProvider.Writer;
-048
-049/**
-050 * Entry point for users of the Write 
Ahead Log.
-051 * Acts as the shim between internal use 
and the particular WALProvider we use to handle wal
-052 * requests.
-053 *
-054 * Configure which provider gets used 
with the configuration setting "hbase.wal.provider". Available
-055 * implementations:
-056 * ul
-057 *   
liemdefaultProvider/em : whatever provider is standard 
for the hbase version. Currently
-058 *  
"filesystem"/li
-059 *   
liemfilesystem/em : a provider that will run on top of 
an implementation of the Hadoop
-060 * FileSystem 
interface, normally HDFS./li
-061 *   
liemmultiwal/em : a provider that will use multiple 
"filesystem" wal instances per region
-062 *   
server./li
-063 * /ul
-064 *
-065 * Alternatively, you may provide a 
custom implementation of {@link WALProvider} by class name.
-066 */
-067@InterfaceAudience.Private
-068public class WALFactory implements 
WALFileLengthProvider {
-069
-070  private static final Logger LOG = 
LoggerFactory.getLogger(WALFactory.class);
-071
-072  /**
-073   * Maps between configuration names for 
providers and implementation classes.
-074   */
-075  static enum Providers {
-076
defaultProvider(AsyncFSWALProvider.class),
-077

[04/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
index 6066c36..84a95a3 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class IOTestProvider.IOTestWriter
+private static class IOTestProvider.IOTestWriter
 extends org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 Presumes init will be called by a single thread prior to 
any access of other methods.
 
@@ -272,7 +272,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 doAppends
-privateboolean doAppends
+privateboolean doAppends
 
 
 
@@ -281,7 +281,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 doSyncs
-privateboolean doSyncs
+privateboolean doSyncs
 
 
 
@@ -298,7 +298,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 IOTestWriter
-privateIOTestWriter()
+privateIOTestWriter()
 
 
 
@@ -315,7 +315,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 init
-publicvoidinit(org.apache.hadoop.fs.FileSystemfs,
+publicvoidinit(org.apache.hadoop.fs.FileSystemfs,
  org.apache.hadoop.fs.Pathpath,
  org.apache.hadoop.conf.Configurationconf,
  booleanoverwritable)
@@ -338,7 +338,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 getWriterClassName
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetWriterClassName()
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetWriterClassName()
 
 Overrides:
 getWriterClassNamein 
classorg.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter
@@ -351,7 +351,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 append
-publicvoidappend(org.apache.hadoop.hbase.wal.WAL.Entryentry)
+publicvoidappend(org.apache.hadoop.hbase.wal.WAL.Entryentry)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -369,7 +369,7 @@ extends 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
 
 
 sync
-publicvoidsync()
+publicvoidsync()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
index 4d872c1..724f442 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
@@ -114,30 +114,29 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class IOTestProvider
+public class IOTestProvider
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements org.apache.hadoop.hbase.wal.WALProvider
-A WAL Provider that returns a single thread safe WAL that 
optionally can skip parts of our
- normal interactions with HDFS.
-
- This implementation picks a directory in HDFS based on the same mechanisms as 
the 
- FSHLogProvider. Users can configure how much interaction
- we have with HDFS with the configuration property 
"hbase.wal.iotestprovider.operations".
- The value should be a comma separated list of allowed operations:
+A WAL Provider that returns a single thread safe WAL that 
optionally can skip parts of our normal
+ interactions with HDFS.
+ 
+ This implementation picks a directory in HDFS based on the same mechanisms as 
the
+ FSHLogProvider. Users can configure how much interaction we have 
with HDFS with the
+ configuration property "hbase.wal.iotestprovider.operations". The value 
should be a comma
+ separated list of allowed operations:
  
-   append   : edits will be written to the underlying filesystem
-   sync : wal syncs will result in hflush calls
-   fileroll : roll requests will result in creating a new file on 
the underlying
-   filesystem.
+ append : edits will be written to the underlying filesystem
+ sync : wal 

[51/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f183e80f
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f183e80f
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f183e80f

Branch: refs/heads/asf-site
Commit: f183e80f4a672944349adc832622dcfcd75deb3e
Parents: 096cff0
Author: jenkins 
Authored: Thu Jan 11 15:30:28 2018 +
Committer: jenkins 
Committed: Thu Jan 11 15:30:28 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apidocs/index-all.html  |46 +-
 .../hadoop/hbase/NamespaceExistException.html   | 4 +-
 .../hadoop/hbase/class-use/TableName.html   | 9 +-
 .../replication/ReplicationPeerConfig.html  | 4 +-
 .../ReplicationPeerConfigBuilder.html   |   177 +-
 .../class-use/ReplicationPeerConfig.html| 4 +-
 .../class-use/ReplicationPeerConfigBuilder.html |42 +-
 .../hadoop/hbase/NamespaceExistException.html   | 4 +-
 .../replication/ReplicationPeerConfig.html  |   108 +-
 .../ReplicationPeerConfigBuilder.html   |   125 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 16478 -
 checkstyle.rss  |44 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |62 +-
 .../hadoop/hbase/NamespaceExistException.html   | 4 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hadoop/hbase/class-use/TableName.html   | 9 +-
 .../hbase/client/class-use/RegionInfo.html  |20 +
 .../hadoop/hbase/client/package-tree.html   |22 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |18 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../HRegionServer.MovedRegionInfo.html  |16 +-
 .../HRegionServer.MovedRegionsCleaner.html  |16 +-
 .../hbase/regionserver/HRegionServer.html   |   255 +-
 .../hadoop/hbase/regionserver/package-tree.html |16 +-
 .../querymatcher/UserScanQueryMatcher.html  | 8 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 ...Config.ReplicationPeerConfigBuilderImpl.html |   101 +-
 .../replication/ReplicationPeerConfig.html  | 4 +-
 .../ReplicationPeerConfigBuilder.html   |   160 +-
 .../class-use/ReplicationPeerConfig.html| 4 +-
 .../class-use/ReplicationPeerConfigBuilder.html |42 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 4 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../LoadIncrementalHFiles.BulkHFileVisitor.html | 6 +-
 .../LoadIncrementalHFiles.LoadQueueItem.html|14 +-
 .../hbase/tool/LoadIncrementalHFiles.html   |   116 +-
 .../hadoop/hbase/util/class-use/IdLock.html |22 -
 .../hadoop/hbase/util/class-use/KeyLocker.html  |22 +
 .../apache/hadoop/hbase/util/package-tree.html  |10 +-
 .../apache/hadoop/hbase/util/package-use.html   | 5 +-
 .../hbase/wal/AbstractFSWALProvider.Reader.html | 4 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.html |99 +-
 .../hadoop/hbase/wal/AsyncFSWALProvider.html| 2 +-
 .../hadoop/hbase/wal/DisabledWALProvider.html   |13 +-
 .../apache/hadoop/hbase/wal/FSHLogProvider.html | 2 +-
 ...oupingProvider.IdentityGroupingStrategy.html | 8 +-
 ...GroupingProvider.RegionGroupingStrategy.html | 8 +-
 .../wal/RegionGroupingProvider.Strategies.html  |16 +-
 .../hbase/wal/RegionGroupingProvider.html   |63 +-
 .../hadoop/hbase/wal/WALFactory.Providers.html  |16 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.html |73 +-
 

[45/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index c9994ce..c21343b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -704,19 +704,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.BloomType
 org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
index eac86aa..b824c32 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
@@ -579,7 +579,7 @@ extends 
 
 isGet
-protected abstractbooleanisGet()
+protected abstractbooleanisGet()
 
 
 
@@ -588,7 +588,7 @@ extends 
 
 moreRowsMayExistsAfter
-protected abstractbooleanmoreRowsMayExistsAfter(intcmpToStopRow)
+protected abstractbooleanmoreRowsMayExistsAfter(intcmpToStopRow)
 
 
 
@@ -597,7 +597,7 @@ extends 
 
 moreRowsMayExistAfter
-publicbooleanmoreRowsMayExistAfter(Cellcell)
+publicbooleanmoreRowsMayExistAfter(Cellcell)
 
 Specified by:
 moreRowsMayExistAfterin
 classScanQueryMatcher
@@ -614,7 +614,7 @@ extends 
 
 create
-public staticUserScanQueryMatchercreate(Scanscan,
+public staticUserScanQueryMatchercreate(Scanscan,
   ScanInfoscanInfo,
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]columns,
   longoldestUnexpiredTS,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index af72bf3..c2cf4f9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,8 +131,8 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or 

[35/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index e743560..163ade0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -2124,1654 +2124,1642 @@
 2116return healthy;
 2117  }
 2118
-2119  private static final byte[] 
UNSPECIFIED_REGION = new byte[]{};
-2120
-2121  @Override
-2122  public ListWAL getWALs() 
throws IOException {
-2123return walFactory.getWALs();
-2124  }
-2125
-2126  @Override
-2127  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
-2128WAL wal;
-2129// _ROOT_ and hbase:meta regions 
have separate WAL.
-2130if (regionInfo != null  
regionInfo.isMetaRegion()
-2131 
regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-2132  wal = 
walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
-2133} else if (regionInfo == null) {
-2134  wal = 
walFactory.getWAL(UNSPECIFIED_REGION, null);
-2135} else {
-2136  byte[] namespace = 
regionInfo.getTable().getNamespace();
-2137  wal = 
walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
-2138}
-2139if (this.walRoller != null) {
-2140  this.walRoller.addWAL(wal);
-2141}
-2142return wal;
-2143  }
-2144
-2145  public LogRoller getWalRoller() {
-2146return walRoller;
-2147  }
-2148
-2149  @Override
-2150  public Connection getConnection() {
-2151return getClusterConnection();
-2152  }
-2153
-2154  @Override
-2155  public ClusterConnection 
getClusterConnection() {
-2156return this.clusterConnection;
-2157  }
-2158
-2159  @Override
-2160  public MetaTableLocator 
getMetaTableLocator() {
-2161return this.metaTableLocator;
-2162  }
-2163
-2164  @Override
-2165  public void stop(final String msg) {
-2166stop(msg, false, 
RpcServer.getRequestUser().orElse(null));
-2167  }
-2168
-2169  /**
-2170   * Stops the regionserver.
-2171   * @param msg Status message
-2172   * @param force True if this is a 
regionserver abort
-2173   * @param user The user executing the 
stop request, or null if no user is associated
-2174   */
-2175  public void stop(final String msg, 
final boolean force, final User user) {
-2176if (!this.stopped) {
-2177  LOG.info("* STOPPING region 
server '" + this + "' *");
-2178  if (this.rsHost != null) {
-2179// when forced via abort don't 
allow CPs to override
-2180try {
-2181  this.rsHost.preStop(msg, 
user);
-2182} catch (IOException ioe) {
-2183  if (!force) {
-2184LOG.warn("The region server 
did not stop", ioe);
-2185return;
-2186  }
-2187  LOG.warn("Skipping coprocessor 
exception on preStop() due to forced shutdown", ioe);
-2188}
-2189  }
-2190  this.stopped = true;
-2191  LOG.info("STOPPED: " + msg);
-2192  // Wakes run() if it is sleeping
-2193  sleeper.skipSleepCycle();
-2194}
-2195  }
-2196
-2197  public void waitForServerOnline(){
-2198while (!isStopped()  
!isOnline()) {
-2199  synchronized (online) {
-2200try {
-2201  online.wait(msgInterval);
-2202} catch (InterruptedException 
ie) {
-2203  
Thread.currentThread().interrupt();
-2204  break;
-2205}
-2206  }
-2207}
-2208  }
-2209
-2210  @Override
-2211  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2212  throws KeeperException, 
IOException {
-2213HRegion r = context.getRegion();
-2214long masterSystemTime = 
context.getMasterSystemTime();
-2215rpcServices.checkOpen();
-2216LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2217// Do checks to see if we need to 
compact (references or too many files)
-2218for (HStore s : r.stores.values()) 
{
-2219  if (s.hasReferences() || 
s.needsCompaction()) {
-2220
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2221  }
-}
-2223long openSeqNum = 
r.getOpenSeqNum();
-2224if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2225  // If we opened a region, we 
should have read some sequence number from it.
-2226  LOG.error("No sequence number 
found when opening " +
-2227
r.getRegionInfo().getRegionNameAsString());
-2228  openSeqNum = 0;
-2229}
+2119  @Override
+2120  public ListWAL getWALs() 
throws IOException {
+2121return walFactory.getWALs();
+2122  }
+2123
+2124  @Override
+2125  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
+2126WAL wal = 
walFactory.getWAL(regionInfo);
+2127if 

[08/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
new file mode 100644
index 000..b2f85ca
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestUserScanQueryMatcher.AlwaysIncludeFilter (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher
+Class TestUserScanQueryMatcher.AlwaysIncludeFilter
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.filter.Filter
+
+
+org.apache.hadoop.hbase.filter.FilterBase
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeFilter
+
+
+
+
+
+
+
+
+
+
+
+Enclosing class:
+TestUserScanQueryMatcher
+
+
+
+private class TestUserScanQueryMatcher.AlwaysIncludeFilter
+extends org.apache.hadoop.hbase.filter.FilterBase
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.filter.Filter
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.filter.Filter
+reversed
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Modifier
+Constructor and Description
+
+
+private 
+AlwaysIncludeFilter()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
+filterKeyValue(org.apache.hadoop.hbase.Cellc)
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.filter.FilterBase
+createFilterFromArguments, filterAllRemaining, filterRow, 
filterRowCells, filterRowKey, filterRowKey, getNextCellHint, hasFilterRow, 
isFamilyEssential, reset, toByteArray, toString, transformCell
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.filter.Filter
+filterCell, isReversed, parseFrom, setReversed
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 

[41/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html 
b/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
index 02ca11f..97a2ac5 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionGroupingProvider
+public class RegionGroupingProvider
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements WALProvider
 A WAL Provider that returns a WAL per group of regions.
@@ -194,7 +194,7 @@ implements 
 
 
-private IdLock
+private KeyLockerhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 createLock
 
 
@@ -307,8 +307,7 @@ implements 
 
 WAL
-getWAL(byte[]identifier,
-  byte[]namespace)
+getWAL(RegionInforegion)
 
 
 private WAL
@@ -361,7 +360,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -370,7 +369,7 @@ implements 
 
 REGION_GROUPING_STRATEGY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_GROUPING_STRATEGY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_GROUPING_STRATEGY
 
 See Also:
 Constant
 Field Values
@@ -383,7 +382,7 @@ implements 
 
 DEFAULT_REGION_GROUPING_STRATEGY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DEFAULT_REGION_GROUPING_STRATEGY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DEFAULT_REGION_GROUPING_STRATEGY
 
 
 
@@ -392,7 +391,7 @@ implements 
 
 DELEGATE_PROVIDER
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DELEGATE_PROVIDER
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DELEGATE_PROVIDER
 delegate provider for WAL creation/roll/close
 
 See Also:
@@ -406,7 +405,7 @@ implements 
 
 DEFAULT_DELEGATE_PROVIDER
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DEFAULT_DELEGATE_PROVIDER
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DEFAULT_DELEGATE_PROVIDER
 
 
 
@@ -415,7 +414,7 @@ implements 
 
 META_WAL_GROUP_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String META_WAL_GROUP_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String META_WAL_GROUP_NAME
 
 See Also:
 Constant
 Field Values
@@ -428,7 +427,7 @@ implements 
 
 cached
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,WALProvider cached
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,WALProvider cached
 A group-provider mapping, make sure one-one rather than 
many-one mapping
 
 
@@ -438,7 +437,7 @@ implements 
 
 createLock
-private finalIdLock createLock
+private finalKeyLockerhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String createLock
 
 
 
@@ -447,7 +446,7 @@ implements 
 
 strategy
-privateRegionGroupingProvider.RegionGroupingStrategy 
strategy
+privateRegionGroupingProvider.RegionGroupingStrategy 
strategy
 
 
 
@@ -456,7 +455,7 @@ implements 
 
 factory
-privateWALFactory factory
+privateWALFactory factory
 
 
 
@@ -465,7 +464,7 @@ implements 
 
 listeners

[43/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html 
b/devapidocs/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
index 2ddc94a..fd2c290 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class LoadIncrementalHFiles
+public class LoadIncrementalHFiles
 extends org.apache.hadoop.conf.Configured
 implements org.apache.hadoop.util.Tool
 Tool to load the output of HFileOutputFormat into an 
existing table.
@@ -605,7 +605,7 @@ implements org.apache.hadoop.util.Tool
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -614,7 +614,7 @@ implements org.apache.hadoop.util.Tool
 
 
 NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAME
 
 See Also:
 Constant
 Field Values
@@ -627,7 +627,7 @@ implements org.apache.hadoop.util.Tool
 
 
 RETRY_ON_IO_EXCEPTION
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRY_ON_IO_EXCEPTION
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRY_ON_IO_EXCEPTION
 
 See Also:
 Constant
 Field Values
@@ -640,7 +640,7 @@ implements org.apache.hadoop.util.Tool
 
 
 MAX_FILES_PER_REGION_PER_FAMILY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_FILES_PER_REGION_PER_FAMILY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_FILES_PER_REGION_PER_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -653,7 +653,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ASSIGN_SEQ_IDS
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ASSIGN_SEQ_IDS
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ASSIGN_SEQ_IDS
 
 See Also:
 Constant
 Field Values
@@ -666,7 +666,7 @@ implements org.apache.hadoop.util.Tool
 
 
 CREATE_TABLE_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CREATE_TABLE_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CREATE_TABLE_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -679,7 +679,7 @@ implements org.apache.hadoop.util.Tool
 
 
 IGNORE_UNMATCHED_CF_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String IGNORE_UNMATCHED_CF_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String IGNORE_UNMATCHED_CF_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -692,7 +692,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ALWAYS_COPY_FILES
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ALWAYS_COPY_FILES
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ALWAYS_COPY_FILES
 
 See Also:
 Constant
 Field Values
@@ -705,7 +705,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TMP_DIR
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TMP_DIR
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TMP_DIR
 
 See Also:
 Constant
 Field Values
@@ -718,7 +718,7 @@ implements org.apache.hadoop.util.Tool
 
 
 maxFilesPerRegionPerFamily
-private finalint maxFilesPerRegionPerFamily
+private finalint maxFilesPerRegionPerFamily
 
 
 
@@ -727,7 +727,7 @@ implements org.apache.hadoop.util.Tool
 
 
 assignSeqIds
-private finalboolean assignSeqIds
+private finalboolean assignSeqIds
 
 
 
@@ -736,7 +736,7 @@ implements 

[44/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
index a0b53d3..5bfb6b7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationPeerConfigBuilder.html
@@ -127,16 +127,22 @@
 
 
 default ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+ReplicationPeerConfigBuilder.putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+Adds all of the provided "raw" configuration entries to 
this.
+
 
 
 default ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+ReplicationPeerConfigBuilder.putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+Sets all of the provided serialized peer configuration 
data.
+
 
 
 ReplicationPeerConfigBuilder
 ReplicationPeerConfigBuilder.putConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue)
+Sets a "raw" configuration property for this replication 
peer.
+
 
 
 ReplicationPeerConfigBuilder
@@ -146,7 +152,9 @@
 
 ReplicationPeerConfigBuilder
 ReplicationPeerConfigBuilder.putPeerData(byte[]key,
-   byte[]value)
+   byte[]value)
+Sets the serialized peer configuration data
+
 
 
 ReplicationPeerConfigBuilder
@@ -155,7 +163,9 @@
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setBandwidth(longbandwidth)
+ReplicationPeerConfigBuilder.setBandwidth(longbandwidth)
+Sets the speed, in bytes per second, for any one 
RegionServer to replicate data to the peer.
+
 
 
 ReplicationPeerConfigBuilder
@@ -174,7 +184,10 @@
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+ReplicationPeerConfigBuilder.setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespaces)
+Sets the collection of namespaces which should not be 
replicated when all user tables are
+ configured to be replicated.
+
 
 
 ReplicationPeerConfigBuilder
@@ -182,7 +195,9 @@
 
 
 ReplicationPeerConfigBuilder
-ReplicationPeerConfigBuilder.setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCFsMap)
+ReplicationPeerConfigBuilder.setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 

[12/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHMobStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHMobStore.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHMobStore.html
index a4307f9..9a73216 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHMobStore.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHMobStore.html
@@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 init
-privatevoidinit(org.apache.hadoop.conf.Configurationconf,
+privatevoidinit(org.apache.hadoop.conf.Configurationconf,
   org.apache.hadoop.hbase.HColumnDescriptorhcd)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -763,7 +763,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testGetFromMemStore
-publicvoidtestGetFromMemStore()
+publicvoidtestGetFromMemStore()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Getting data from memstore
 
@@ -778,7 +778,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testGetFromFiles
-publicvoidtestGetFromFiles()
+publicvoidtestGetFromFiles()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Getting MOB data from files
 
@@ -793,7 +793,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testGetReferencesFromFiles
-publicvoidtestGetReferencesFromFiles()
+publicvoidtestGetReferencesFromFiles()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Getting the reference data from files
 
@@ -808,7 +808,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testGetFromMemStoreAndFiles
-publicvoidtestGetFromMemStoreAndFiles()
+publicvoidtestGetFromMemStoreAndFiles()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Getting data from memstore and files
 
@@ -823,7 +823,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMobCellSizeThreshold
-publicvoidtestMobCellSizeThreshold()
+publicvoidtestMobCellSizeThreshold()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Getting data from memstore and files
 
@@ -838,7 +838,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testCommitFile
-publicvoidtestCommitFile()
+publicvoidtestCommitFile()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -852,7 +852,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testResolve
-publicvoidtestResolve()
+publicvoidtestResolve()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -866,7 +866,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 flush
-privatevoidflush(intstoreFilesSize)
+privatevoidflush(intstoreFilesSize)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Flush the memstore
 
@@ -883,7 +883,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 flushStore
-private staticvoidflushStore(org.apache.hadoop.hbase.regionserver.HMobStorestore,
+private staticvoidflushStore(org.apache.hadoop.hbase.regionserver.HMobStorestore,
longid)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Flush the memstore
@@ -902,7 +902,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMOBStoreEncryption
-publicvoidtestMOBStoreEncryption()
+publicvoidtestMOBStoreEncryption()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -916,7 +916,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 

[14/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index dc2a8e8..3630ba9 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -143,8 +143,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 org.apache.hadoop.hbase.backup.TestIncrementalBackupMergeWithFailures.FailurePhase
+org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
index f11329e..5c8c746 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
@@ -4062,6 +4062,23 @@
   intinitRowCount,
   intfactor)
 
+
+static int
+TestLoadIncrementalHFiles.loadHFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+  org.apache.hadoop.hbase.client.TableDescriptorhtd,
+  HBaseTestingUtilityutil,
+  byte[]fam,
+  byte[]qual,
+  booleanpreCreateTable,
+  byte[][]tableSplitKeys,
+  byte[][][]hfileRanges,
+  booleanuseMap,
+  booleandeleteFile,
+  booleancopyFiles,
+  intinitRowCount,
+  intfactor,
+  intdepth)
+
 
 
 
@@ -4256,7 +4273,7 @@
 TestBoundedRegionGroupingStrategy.TEST_UTIL
 
 
-protected static HBaseTestingUtility
+private static HBaseTestingUtility
 TestFSHLogProvider.TEST_UTIL
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestWALObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestWALObserver.html 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestWALObserver.html
index 231e17c..f64f91d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestWALObserver.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestWALObserver.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestWALObserver
+public class TestWALObserver
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests invocation of the
  MasterObserver interface hooks at
@@ -189,10 +189,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 TEST_VALUE
 
 
-private static byte[]
-UNSPECIFIED_REGION
-
-
 private 
org.apache.hadoop.hbase.wal.WALFactory
 wals
 
@@ -237,8 +233,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private void
-addWALEdits(org.apache.hadoop.hbase.TableNametableName,
-   org.apache.hadoop.hbase.HRegionInfohri,
+addWALEdits(org.apache.hadoop.hbase.TableNametableName,
+   org.apache.hadoop.hbase.client.RegionInfohri,
byte[]rowName,
byte[]family,
intcount,
@@ -248,12 +244,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?

org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControlmvcc)
 
 
-private 
org.apache.hadoop.hbase.HRegionInfo
-createBasic3FamilyHRegionInfo(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableName)
+private 
org.apache.hadoop.hbase.client.TableDescriptor
+createBasic3FamilyHTD(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableName)
 
 
-private 
org.apache.hadoop.hbase.HTableDescriptor
-createBasic3FamilyHTD(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableName)
+private 

[28/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
index 972d795..d4f4a3d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.html
@@ -39,489 +39,490 @@
 031import org.apache.hadoop.fs.Path;
 032import 
org.apache.hadoop.hbase.HConstants;
 033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.yetus.audience.InterfaceStability;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
-039import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.FSUtils;
-042import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-043import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-044
-045/**
-046 * Base class of a WAL Provider that 
returns a single thread safe WAL that writes to Hadoop FS. By
-047 * default, this implementation picks a 
directory in Hadoop FS based on a combination of
-048 * ul
-049 * lithe HBase root directory
-050 * 
liHConstants.HREGION_LOGDIR_NAME
-051 * lithe given factory's 
factoryId (usually identifying the regionserver by host:port)
-052 * /ul
-053 * It also uses the providerId to 
differentiate among files.
-054 */
-055@InterfaceAudience.Private
-056@InterfaceStability.Evolving
-057public abstract class 
AbstractFSWALProviderT extends AbstractFSWAL? implements 
WALProvider {
-058
-059  private static final Logger LOG = 
LoggerFactory.getLogger(AbstractFSWALProvider.class);
-060
-061  /** Separate old log into different dir 
by regionserver name **/
-062  public static final String 
SEPARATE_OLDLOGDIR = "hbase.separate.oldlogdir.by.regionserver";
-063  public static final boolean 
DEFAULT_SEPARATE_OLDLOGDIR = false;
-064
-065  // Only public so classes back in 
regionserver.wal can access
-066  public interface Reader extends 
WAL.Reader {
-067/**
-068 * @param fs File system.
-069 * @param path Path.
-070 * @param c Configuration.
-071 * @param s Input stream that may 
have been pre-opened by the caller; may be null.
-072 */
-073void init(FileSystem fs, Path path, 
Configuration c, FSDataInputStream s) throws IOException;
-074  }
-075
-076  protected volatile T wal;
-077  protected WALFactory factory = null;
-078  protected Configuration conf = null;
-079  protected 
ListWALActionsListener listeners = null;
-080  protected String providerId = null;
-081  protected AtomicBoolean initialized = 
new AtomicBoolean(false);
-082  // for default wal provider, logPrefix 
won't change
-083  protected String logPrefix = null;
-084
-085  /**
-086   * we synchronized on walCreateLock to 
prevent wal recreation in different threads
-087   */
-088  private final Object walCreateLock = 
new Object();
-089
-090  /**
-091   * @param factory factory that made us, 
identity used for FS layout. may not be null
-092   * @param conf may not be null
-093   * @param listeners may be null
-094   * @param providerId differentiate 
between providers from one factory, used for FS layout. may be
-095   *  null
-096   */
-097  @Override
-098  public void init(WALFactory factory, 
Configuration conf, ListWALActionsListener listeners,
-099  String providerId) throws 
IOException {
-100if (!initialized.compareAndSet(false, 
true)) {
-101  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-102}
-103this.factory = factory;
-104this.conf = conf;
-105this.listeners = listeners;
-106this.providerId = providerId;
-107// get log prefix
-108StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-109if (providerId != null) {
-110  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-111sb.append(providerId);
-112  } else {
-113
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-114  }
-115}
-116logPrefix = sb.toString();
-117doInit(conf);
-118  }
-119
-120  @Override
-121  public ListWAL getWALs() {
-122if (wal == null) {
-123  return Collections.emptyList();
-124}
-125ListWAL wals = new 
ArrayList(1);
-126wals.add(wal);
-127return wals;
-128  }
-129
-130  @Override
-131  public T getWAL(byte[] identifier, 
byte[] namespace) throws IOException {
-132T walCopy = wal;
-133if (walCopy == null) {
-134  // only lock when need to create 
wal, and need to lock since
-135  // creating hlog on fs is 

[36/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index e743560..163ade0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -2124,1654 +2124,1642 @@
 2116return healthy;
 2117  }
 2118
-2119  private static final byte[] 
UNSPECIFIED_REGION = new byte[]{};
-2120
-2121  @Override
-2122  public ListWAL getWALs() 
throws IOException {
-2123return walFactory.getWALs();
-2124  }
-2125
-2126  @Override
-2127  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
-2128WAL wal;
-2129// _ROOT_ and hbase:meta regions 
have separate WAL.
-2130if (regionInfo != null  
regionInfo.isMetaRegion()
-2131 
regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-2132  wal = 
walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
-2133} else if (regionInfo == null) {
-2134  wal = 
walFactory.getWAL(UNSPECIFIED_REGION, null);
-2135} else {
-2136  byte[] namespace = 
regionInfo.getTable().getNamespace();
-2137  wal = 
walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
-2138}
-2139if (this.walRoller != null) {
-2140  this.walRoller.addWAL(wal);
-2141}
-2142return wal;
-2143  }
-2144
-2145  public LogRoller getWalRoller() {
-2146return walRoller;
-2147  }
-2148
-2149  @Override
-2150  public Connection getConnection() {
-2151return getClusterConnection();
-2152  }
-2153
-2154  @Override
-2155  public ClusterConnection 
getClusterConnection() {
-2156return this.clusterConnection;
-2157  }
-2158
-2159  @Override
-2160  public MetaTableLocator 
getMetaTableLocator() {
-2161return this.metaTableLocator;
-2162  }
-2163
-2164  @Override
-2165  public void stop(final String msg) {
-2166stop(msg, false, 
RpcServer.getRequestUser().orElse(null));
-2167  }
-2168
-2169  /**
-2170   * Stops the regionserver.
-2171   * @param msg Status message
-2172   * @param force True if this is a 
regionserver abort
-2173   * @param user The user executing the 
stop request, or null if no user is associated
-2174   */
-2175  public void stop(final String msg, 
final boolean force, final User user) {
-2176if (!this.stopped) {
-2177  LOG.info("* STOPPING region 
server '" + this + "' *");
-2178  if (this.rsHost != null) {
-2179// when forced via abort don't 
allow CPs to override
-2180try {
-2181  this.rsHost.preStop(msg, 
user);
-2182} catch (IOException ioe) {
-2183  if (!force) {
-2184LOG.warn("The region server 
did not stop", ioe);
-2185return;
-2186  }
-2187  LOG.warn("Skipping coprocessor 
exception on preStop() due to forced shutdown", ioe);
-2188}
-2189  }
-2190  this.stopped = true;
-2191  LOG.info("STOPPED: " + msg);
-2192  // Wakes run() if it is sleeping
-2193  sleeper.skipSleepCycle();
-2194}
-2195  }
-2196
-2197  public void waitForServerOnline(){
-2198while (!isStopped()  
!isOnline()) {
-2199  synchronized (online) {
-2200try {
-2201  online.wait(msgInterval);
-2202} catch (InterruptedException 
ie) {
-2203  
Thread.currentThread().interrupt();
-2204  break;
-2205}
-2206  }
-2207}
-2208  }
-2209
-2210  @Override
-2211  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2212  throws KeeperException, 
IOException {
-2213HRegion r = context.getRegion();
-2214long masterSystemTime = 
context.getMasterSystemTime();
-2215rpcServices.checkOpen();
-2216LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2217// Do checks to see if we need to 
compact (references or too many files)
-2218for (HStore s : r.stores.values()) 
{
-2219  if (s.hasReferences() || 
s.needsCompaction()) {
-2220
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2221  }
-}
-2223long openSeqNum = 
r.getOpenSeqNum();
-2224if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2225  // If we opened a region, we 
should have read some sequence number from it.
-2226  LOG.error("No sequence number 
found when opening " +
-2227
r.getRegionInfo().getRegionNameAsString());
-2228  openSeqNum = 0;
-2229}
+2119  @Override
+2120  public ListWAL getWALs() 
throws IOException {
+2121return walFactory.getWALs();
+2122  }
+2123
+2124  @Override
+2125  public 

[24/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
index 4fec5eb..63c9ca7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
@@ -35,254 +35,262 @@
 027import java.util.List;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030
+030import java.util.concurrent.locks.Lock;
 031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import org.slf4j.Logger;
-034import org.slf4j.LoggerFactory;
-035// imports for classes still in 
regionserver.wal
-036import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-037import 
org.apache.hadoop.hbase.util.Bytes;
-038import 
org.apache.hadoop.hbase.util.IdLock;
-039
-040/**
-041 * A WAL Provider that returns a WAL per 
group of regions.
-042 *
-043 * This provider follows the decorator 
pattern and mainly holds the logic for WAL grouping.
-044 * WAL creation/roll/close is delegated 
to {@link #DELEGATE_PROVIDER}
-045 *
-046 * Region grouping is handled via {@link 
RegionGroupingStrategy} and can be configured via the
-047 * property 
"hbase.wal.regiongrouping.strategy". Current strategy choices are
-048 * ul
-049 *   
liemdefaultStrategy/em : Whatever strategy this version 
of HBase picks. currently
-050 *  
"bounded"./li
-051 *   
liemidentity/em : each region belongs to its own 
group./li
-052 *   
liembounded/em : bounded number of groups and region 
evenly assigned to each group./li
-053 * /ul
-054 * Optionally, a FQCN to a custom 
implementation may be given.
-055 */
-056@InterfaceAudience.Private
-057public class RegionGroupingProvider 
implements WALProvider {
-058  private static final Logger LOG = 
LoggerFactory.getLogger(RegionGroupingProvider.class);
-059
-060  /**
-061   * Map identifiers to a group number.
-062   */
-063  public static interface 
RegionGroupingStrategy {
-064String GROUP_NAME_DELIMITER = ".";
-065
-066/**
-067 * Given an identifier and a 
namespace, pick a group.
-068 */
-069String group(final byte[] identifier, 
byte[] namespace);
-070void init(Configuration config, 
String providerId);
-071  }
-072
-073  /**
-074   * Maps between configuration names for 
strategies and implementation classes.
-075   */
-076  static enum Strategies {
-077
defaultStrategy(BoundedGroupingStrategy.class),
-078
identity(IdentityGroupingStrategy.class),
-079
bounded(BoundedGroupingStrategy.class),
-080
namespace(NamespaceGroupingStrategy.class);
-081
-082final Class? extends 
RegionGroupingStrategy clazz;
-083Strategies(Class? extends 
RegionGroupingStrategy clazz) {
-084  this.clazz = clazz;
-085}
-086  }
-087
-088  /**
-089   * instantiate a strategy from a config 
property.
-090   * requires conf to have already been 
set (as well as anything the provider might need to read).
-091   */
-092  RegionGroupingStrategy 
getStrategy(final Configuration conf, final String key,
-093  final String defaultValue) throws 
IOException {
-094Class? extends 
RegionGroupingStrategy clazz;
-095try {
-096  clazz = 
Strategies.valueOf(conf.get(key, defaultValue)).clazz;
-097} catch (IllegalArgumentException 
exception) {
-098  // Fall back to them specifying a 
class name
-099  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-100  // when there is a config value 
present.
-101  clazz = conf.getClass(key, 
IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
-102}
-103LOG.info("Instantiating 
RegionGroupingStrategy of type " + clazz);
-104try {
-105  final RegionGroupingStrategy result 
= clazz.newInstance();
-106  result.init(conf, providerId);
-107  return result;
-108} catch (InstantiationException 
exception) {
-109  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-110  REGION_GROUPING_STRATEGY);
-111  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-112  throw new IOException("couldn't set 
up region grouping strategy", exception);
-113} catch (IllegalAccessException 
exception) {
-114  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-115  REGION_GROUPING_STRATEGY);
-116  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-117  throw new IOException("couldn't set 
up region grouping strategy", exception);
-118}
-119  }
-120
-121  

[39/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index e743560..163ade0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -2124,1654 +2124,1642 @@
 2116return healthy;
 2117  }
 2118
-2119  private static final byte[] 
UNSPECIFIED_REGION = new byte[]{};
-2120
-2121  @Override
-2122  public ListWAL getWALs() 
throws IOException {
-2123return walFactory.getWALs();
-2124  }
-2125
-2126  @Override
-2127  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
-2128WAL wal;
-2129// _ROOT_ and hbase:meta regions 
have separate WAL.
-2130if (regionInfo != null  
regionInfo.isMetaRegion()
-2131 
regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-2132  wal = 
walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
-2133} else if (regionInfo == null) {
-2134  wal = 
walFactory.getWAL(UNSPECIFIED_REGION, null);
-2135} else {
-2136  byte[] namespace = 
regionInfo.getTable().getNamespace();
-2137  wal = 
walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
-2138}
-2139if (this.walRoller != null) {
-2140  this.walRoller.addWAL(wal);
-2141}
-2142return wal;
-2143  }
-2144
-2145  public LogRoller getWalRoller() {
-2146return walRoller;
-2147  }
-2148
-2149  @Override
-2150  public Connection getConnection() {
-2151return getClusterConnection();
-2152  }
-2153
-2154  @Override
-2155  public ClusterConnection 
getClusterConnection() {
-2156return this.clusterConnection;
-2157  }
-2158
-2159  @Override
-2160  public MetaTableLocator 
getMetaTableLocator() {
-2161return this.metaTableLocator;
-2162  }
-2163
-2164  @Override
-2165  public void stop(final String msg) {
-2166stop(msg, false, 
RpcServer.getRequestUser().orElse(null));
-2167  }
-2168
-2169  /**
-2170   * Stops the regionserver.
-2171   * @param msg Status message
-2172   * @param force True if this is a 
regionserver abort
-2173   * @param user The user executing the 
stop request, or null if no user is associated
-2174   */
-2175  public void stop(final String msg, 
final boolean force, final User user) {
-2176if (!this.stopped) {
-2177  LOG.info("* STOPPING region 
server '" + this + "' *");
-2178  if (this.rsHost != null) {
-2179// when forced via abort don't 
allow CPs to override
-2180try {
-2181  this.rsHost.preStop(msg, 
user);
-2182} catch (IOException ioe) {
-2183  if (!force) {
-2184LOG.warn("The region server 
did not stop", ioe);
-2185return;
-2186  }
-2187  LOG.warn("Skipping coprocessor 
exception on preStop() due to forced shutdown", ioe);
-2188}
-2189  }
-2190  this.stopped = true;
-2191  LOG.info("STOPPED: " + msg);
-2192  // Wakes run() if it is sleeping
-2193  sleeper.skipSleepCycle();
-2194}
-2195  }
-2196
-2197  public void waitForServerOnline(){
-2198while (!isStopped()  
!isOnline()) {
-2199  synchronized (online) {
-2200try {
-2201  online.wait(msgInterval);
-2202} catch (InterruptedException 
ie) {
-2203  
Thread.currentThread().interrupt();
-2204  break;
-2205}
-2206  }
-2207}
-2208  }
-2209
-2210  @Override
-2211  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2212  throws KeeperException, 
IOException {
-2213HRegion r = context.getRegion();
-2214long masterSystemTime = 
context.getMasterSystemTime();
-2215rpcServices.checkOpen();
-2216LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2217// Do checks to see if we need to 
compact (references or too many files)
-2218for (HStore s : r.stores.values()) 
{
-2219  if (s.hasReferences() || 
s.needsCompaction()) {
-2220
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2221  }
-}
-2223long openSeqNum = 
r.getOpenSeqNum();
-2224if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2225  // If we opened a region, we 
should have read some sequence number from it.
-2226  LOG.error("No sequence number 
found when opening " +
-2227
r.getRegionInfo().getRegionNameAsString());
-2228  openSeqNum = 0;
-2229}
+2119  @Override
+2120  public ListWAL getWALs() 
throws IOException {
+2121return walFactory.getWALs();
+2122  }
+2123
+2124  @Override
+2125  public WAL getWAL(RegionInfo 

[33/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.BulkHFileVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.BulkHFileVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.BulkHFileVisitor.html
index 1f114e0..01e19b2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.BulkHFileVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.BulkHFileVisitor.html
@@ -25,1239 +25,1263 @@
 017 */
 018package org.apache.hadoop.hbase.tool;
 019
-020import static java.lang.String.format;
-021
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.InterruptedIOException;
-025import java.nio.ByteBuffer;
-026import java.util.ArrayDeque;
-027import java.util.ArrayList;
-028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.Deque;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.Optional;
-038import java.util.Set;
-039import java.util.SortedMap;
-040import java.util.TreeMap;
-041import java.util.UUID;
-042import java.util.concurrent.Callable;
-043import 
java.util.concurrent.ExecutionException;
-044import 
java.util.concurrent.ExecutorService;
-045import java.util.concurrent.Future;
-046import 
java.util.concurrent.LinkedBlockingQueue;
-047import 
java.util.concurrent.ThreadPoolExecutor;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import java.util.stream.Collectors;
-051
-052import 
org.apache.commons.lang3.mutable.MutableInt;
-053import 
org.apache.hadoop.conf.Configuration;
-054import 
org.apache.hadoop.conf.Configured;
-055import org.apache.hadoop.fs.FileStatus;
-056import org.apache.hadoop.fs.FileSystem;
-057import org.apache.hadoop.fs.Path;
-058import 
org.apache.hadoop.fs.permission.FsPermission;
-059import 
org.apache.hadoop.hbase.HBaseConfiguration;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066import 
org.apache.hadoop.hbase.client.Admin;
-067import 
org.apache.hadoop.hbase.client.ClientServiceCallable;
-068import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-069import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-070import 
org.apache.hadoop.hbase.client.Connection;
-071import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-072import 
org.apache.hadoop.hbase.client.RegionLocator;
-073import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-074import 
org.apache.hadoop.hbase.client.SecureBulkLoadClient;
-075import 
org.apache.hadoop.hbase.client.Table;
-076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-077import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-078import 
org.apache.hadoop.hbase.io.HFileLink;
-079import 
org.apache.hadoop.hbase.io.HalfStoreFileReader;
-080import 
org.apache.hadoop.hbase.io.Reference;
-081import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-082import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-083import 
org.apache.hadoop.hbase.io.hfile.HFile;
-084import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-085import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-086import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-087import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-088import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-089import 
org.apache.hadoop.hbase.regionserver.BloomType;
-090import 
org.apache.hadoop.hbase.regionserver.HStore;
-091import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-092import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-093import 
org.apache.hadoop.hbase.security.UserProvider;
-094import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-095import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-096import 
org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
-097import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-098import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimaps;
-099import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-100import 
org.apache.hadoop.hbase.util.Bytes;
-101import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-102import 
org.apache.hadoop.hbase.util.Pair;
-103import org.apache.hadoop.util.Tool;
-104import 
org.apache.hadoop.util.ToolRunner;
-105
-106/**
-107 * Tool to load the 

[25/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
index 4fec5eb..63c9ca7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
@@ -35,254 +35,262 @@
 027import java.util.List;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030
+030import java.util.concurrent.locks.Lock;
 031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import org.slf4j.Logger;
-034import org.slf4j.LoggerFactory;
-035// imports for classes still in 
regionserver.wal
-036import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-037import 
org.apache.hadoop.hbase.util.Bytes;
-038import 
org.apache.hadoop.hbase.util.IdLock;
-039
-040/**
-041 * A WAL Provider that returns a WAL per 
group of regions.
-042 *
-043 * This provider follows the decorator 
pattern and mainly holds the logic for WAL grouping.
-044 * WAL creation/roll/close is delegated 
to {@link #DELEGATE_PROVIDER}
-045 *
-046 * Region grouping is handled via {@link 
RegionGroupingStrategy} and can be configured via the
-047 * property 
"hbase.wal.regiongrouping.strategy". Current strategy choices are
-048 * ul
-049 *   
liemdefaultStrategy/em : Whatever strategy this version 
of HBase picks. currently
-050 *  
"bounded"./li
-051 *   
liemidentity/em : each region belongs to its own 
group./li
-052 *   
liembounded/em : bounded number of groups and region 
evenly assigned to each group./li
-053 * /ul
-054 * Optionally, a FQCN to a custom 
implementation may be given.
-055 */
-056@InterfaceAudience.Private
-057public class RegionGroupingProvider 
implements WALProvider {
-058  private static final Logger LOG = 
LoggerFactory.getLogger(RegionGroupingProvider.class);
-059
-060  /**
-061   * Map identifiers to a group number.
-062   */
-063  public static interface 
RegionGroupingStrategy {
-064String GROUP_NAME_DELIMITER = ".";
-065
-066/**
-067 * Given an identifier and a 
namespace, pick a group.
-068 */
-069String group(final byte[] identifier, 
byte[] namespace);
-070void init(Configuration config, 
String providerId);
-071  }
-072
-073  /**
-074   * Maps between configuration names for 
strategies and implementation classes.
-075   */
-076  static enum Strategies {
-077
defaultStrategy(BoundedGroupingStrategy.class),
-078
identity(IdentityGroupingStrategy.class),
-079
bounded(BoundedGroupingStrategy.class),
-080
namespace(NamespaceGroupingStrategy.class);
-081
-082final Class? extends 
RegionGroupingStrategy clazz;
-083Strategies(Class? extends 
RegionGroupingStrategy clazz) {
-084  this.clazz = clazz;
-085}
-086  }
-087
-088  /**
-089   * instantiate a strategy from a config 
property.
-090   * requires conf to have already been 
set (as well as anything the provider might need to read).
-091   */
-092  RegionGroupingStrategy 
getStrategy(final Configuration conf, final String key,
-093  final String defaultValue) throws 
IOException {
-094Class? extends 
RegionGroupingStrategy clazz;
-095try {
-096  clazz = 
Strategies.valueOf(conf.get(key, defaultValue)).clazz;
-097} catch (IllegalArgumentException 
exception) {
-098  // Fall back to them specifying a 
class name
-099  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-100  // when there is a config value 
present.
-101  clazz = conf.getClass(key, 
IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
-102}
-103LOG.info("Instantiating 
RegionGroupingStrategy of type " + clazz);
-104try {
-105  final RegionGroupingStrategy result 
= clazz.newInstance();
-106  result.init(conf, providerId);
-107  return result;
-108} catch (InstantiationException 
exception) {
-109  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-110  REGION_GROUPING_STRATEGY);
-111  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-112  throw new IOException("couldn't set 
up region grouping strategy", exception);
-113} catch (IllegalAccessException 
exception) {
-114  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-115  REGION_GROUPING_STRATEGY);
-116  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-117  throw new IOException("couldn't set 
up region grouping 

[23/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
index fadf667..14b2b69 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
@@ -7,269 +7,269 @@
 
 
 001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.wal;
 019
-020
-021package org.apache.hadoop.hbase.wal;
-022
-023import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-024
-025import java.io.IOException;
-026import java.io.InterruptedIOException;
-027import java.util.Collections;
-028import java.util.List;
-029import java.util.OptionalLong;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031
-032import 
org.apache.hadoop.conf.Configuration;
-033import org.apache.hadoop.fs.FileSystem;
-034import org.apache.hadoop.fs.Path;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038// imports for things that haven't moved 
from regionserver.wal yet.
-039import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-040import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
-041import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-042import 
org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
-043import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-044import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-045import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-046import 
org.apache.hadoop.hbase.wal.WAL.Reader;
-047import 
org.apache.hadoop.hbase.wal.WALProvider.Writer;
-048
-049/**
-050 * Entry point for users of the Write 
Ahead Log.
-051 * Acts as the shim between internal use 
and the particular WALProvider we use to handle wal
-052 * requests.
-053 *
-054 * Configure which provider gets used 
with the configuration setting "hbase.wal.provider". Available
-055 * implementations:
-056 * ul
-057 *   
liemdefaultProvider/em : whatever provider is standard 
for the hbase version. Currently
-058 *  
"filesystem"/li
-059 *   
liemfilesystem/em : a provider that will run on top of 
an implementation of the Hadoop
-060 * FileSystem 
interface, normally HDFS./li
-061 *   
liemmultiwal/em : a provider that will use multiple 
"filesystem" wal instances per region
-062 *   
server./li
-063 * /ul
-064 *
-065 * Alternatively, you may provide a 
custom implementation of {@link WALProvider} by class name.
-066 */
-067@InterfaceAudience.Private
-068public class WALFactory implements 
WALFileLengthProvider {
-069
-070  private static final Logger LOG = 
LoggerFactory.getLogger(WALFactory.class);
-071
-072  /**
-073   * Maps between configuration names for 
providers and implementation classes.
-074   */
-075  static enum Providers {
-076

[40/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index bfa7220..a29ad5c 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -189,8 +189,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 org.apache.hadoop.hbase.wal.WALFactory.Providers
+org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/NamespaceExistException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/NamespaceExistException.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/NamespaceExistException.html
index 47baf13..7b9c0db 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/NamespaceExistException.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/NamespaceExistException.html
@@ -39,8 +39,8 @@
 031  public NamespaceExistException() {
 032  }
 033
-034  public NamespaceExistException(String 
namespace) {
-035super("Namespace " + namespace + " 
already exists");
+034  public NamespaceExistException(String 
msg) {
+035super(msg);
 036  }
 037}
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index c8cf4de..cd2e99a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"f0d0501d556efb8796138d4aa92ce4f276c93c3b";
+011  public static final String revision = 
"71a1192d671a93cc17b82e4355f2ace97c41dae5";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Wed 
Jan 10 14:42:56 UTC 2018";
+013  public static final String date = "Thu 
Jan 11 14:42:50 UTC 2018";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "1d36de1c42253feef08a258d994ee96c";
+015  public static final String srcChecksum 
= "07936aec0f0e3c30af710d08a44d3d3a";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
index 65c3a26..6adba42 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.html
@@ -200,7 +200,7 @@
 192   */
 193  private void prepareCreate(final 
MasterProcedureEnv env) throws IOException {
 194if 
(getTableNamespaceManager(env).doesNamespaceExist(nsDescriptor.getName())) {
-195  throw new 
NamespaceExistException(nsDescriptor.getName());
+195  throw new 
NamespaceExistException("Namespace " + nsDescriptor.getName() + " already 
exists");
 196}
 197
getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
 198  }



[32/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.LoadQueueItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.LoadQueueItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.LoadQueueItem.html
index 1f114e0..01e19b2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.LoadQueueItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.LoadQueueItem.html
@@ -25,1239 +25,1263 @@
 017 */
 018package org.apache.hadoop.hbase.tool;
 019
-020import static java.lang.String.format;
-021
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.InterruptedIOException;
-025import java.nio.ByteBuffer;
-026import java.util.ArrayDeque;
-027import java.util.ArrayList;
-028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.Collections;
-031import java.util.Deque;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.Optional;
-038import java.util.Set;
-039import java.util.SortedMap;
-040import java.util.TreeMap;
-041import java.util.UUID;
-042import java.util.concurrent.Callable;
-043import 
java.util.concurrent.ExecutionException;
-044import 
java.util.concurrent.ExecutorService;
-045import java.util.concurrent.Future;
-046import 
java.util.concurrent.LinkedBlockingQueue;
-047import 
java.util.concurrent.ThreadPoolExecutor;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import java.util.stream.Collectors;
-051
-052import 
org.apache.commons.lang3.mutable.MutableInt;
-053import 
org.apache.hadoop.conf.Configuration;
-054import 
org.apache.hadoop.conf.Configured;
-055import org.apache.hadoop.fs.FileStatus;
-056import org.apache.hadoop.fs.FileSystem;
-057import org.apache.hadoop.fs.Path;
-058import 
org.apache.hadoop.fs.permission.FsPermission;
-059import 
org.apache.hadoop.hbase.HBaseConfiguration;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066import 
org.apache.hadoop.hbase.client.Admin;
-067import 
org.apache.hadoop.hbase.client.ClientServiceCallable;
-068import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-069import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-070import 
org.apache.hadoop.hbase.client.Connection;
-071import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-072import 
org.apache.hadoop.hbase.client.RegionLocator;
-073import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-074import 
org.apache.hadoop.hbase.client.SecureBulkLoadClient;
-075import 
org.apache.hadoop.hbase.client.Table;
-076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-077import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-078import 
org.apache.hadoop.hbase.io.HFileLink;
-079import 
org.apache.hadoop.hbase.io.HalfStoreFileReader;
-080import 
org.apache.hadoop.hbase.io.Reference;
-081import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-082import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-083import 
org.apache.hadoop.hbase.io.hfile.HFile;
-084import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-085import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-086import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-087import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-088import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-089import 
org.apache.hadoop.hbase.regionserver.BloomType;
-090import 
org.apache.hadoop.hbase.regionserver.HStore;
-091import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-092import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-093import 
org.apache.hadoop.hbase.security.UserProvider;
-094import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-095import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-096import 
org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap;
-097import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-098import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimaps;
-099import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-100import 
org.apache.hadoop.hbase.util.Bytes;
-101import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-102import 
org.apache.hadoop.hbase.util.Pair;
-103import org.apache.hadoop.util.Tool;
-104import 
org.apache.hadoop.util.ToolRunner;
-105
-106/**
-107 * Tool to load the output of 

[06/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
index 2cef7d6..1ec37a5 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestWALEntryStream
+public class TestWALEntryStream
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -165,7 +165,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 fs
 
 
-private static 
org.apache.hadoop.hbase.HRegionInfo
+private static 
org.apache.hadoop.hbase.client.RegionInfo
 info
 
 
@@ -362,7 +362,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private staticHBaseTestingUtility TEST_UTIL
+private staticHBaseTestingUtility TEST_UTIL
 
 
 
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-private staticorg.apache.hadoop.conf.Configuration conf
+private staticorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -380,7 +380,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-private staticorg.apache.hadoop.fs.FileSystem fs
+private staticorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cluster
-private staticorg.apache.hadoop.hdfs.MiniDFSCluster cluster
+private staticorg.apache.hadoop.hdfs.MiniDFSCluster cluster
 
 
 
@@ -398,7 +398,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableName
-private static finalorg.apache.hadoop.hbase.TableName tableName
+private static finalorg.apache.hadoop.hbase.TableName tableName
 
 
 
@@ -407,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 family
-private static finalbyte[] family
+private static finalbyte[] family
 
 
 
@@ -416,7 +416,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qualifier
-private static finalbyte[] qualifier
+private static finalbyte[] qualifier
 
 
 
@@ -425,7 +425,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 info
-private static finalorg.apache.hadoop.hbase.HRegionInfo info
+private static finalorg.apache.hadoop.hbase.client.RegionInfo info
 
 
 
@@ -434,7 +434,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 scopes
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer scopes
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer scopes
 
 
 
@@ -443,7 +443,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 log
-privateorg.apache.hadoop.hbase.wal.WAL log
+privateorg.apache.hadoop.hbase.wal.WAL log
 
 
 
@@ -452,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 walQueue
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
 walQueue
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
 walQueue
 
 
 
@@ -461,7 +461,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pathWatcher
-privateTestWALEntryStream.PathWatcher
 pathWatcher
+privateTestWALEntryStream.PathWatcher
 pathWatcher
 
 
 
@@ -470,7 +470,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tn
-publicorg.junit.rules.TestName tn
+publicorg.junit.rules.TestName tn
 
 
 
@@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 mvcc
-private 
finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl 
mvcc
+private 
finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl 
mvcc
 
 
 
@@ 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index d446a14..97f60fb 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -2218,7 +2218,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 hbaseAdmin
-privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
+privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
 
 
 
@@ -2227,7 +2227,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 random
-private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
+private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
 
 
 
@@ -2236,7 +2236,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 portAllocator
-private static finalHBaseTestingUtility.PortAllocator 
portAllocator
+private static finalHBaseTestingUtility.PortAllocator 
portAllocator
 
 
 
@@ -4746,7 +4746,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
+public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
   
org.apache.hadoop.fs.PathrootDir,
   
org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.hbase.client.TableDescriptorhtd)
@@ -4765,7 +4765,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
+public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
   
org.apache.hadoop.fs.PathrootDir,
   
org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.hbase.client.TableDescriptorhtd,
@@ -4785,7 +4785,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table.
 
@@ -4800,7 +4800,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(org.apache.hadoop.hbase.TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(org.apache.hadoop.hbase.TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table for a given user 
table
 
@@ -4815,7 +4815,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getOtherRegionServer
-publicorg.apache.hadoop.hbase.regionserver.HRegionServergetOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServerrs)
+publicorg.apache.hadoop.hbase.regionserver.HRegionServergetOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServerrs)
 
 
 
@@ -4824,7 +4824,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getRSForFirstRegionInTable
-publicorg.apache.hadoop.hbase.regionserver.HRegionServergetRSForFirstRegionInTable(org.apache.hadoop.hbase.TableNametableName)

[34/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
index 6096dcc..3cb24fc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.html
@@ -245,56 +245,62 @@
 237count += 1;
 238
 239if (count  versionsAfterFilter) 
{
-240  return MatchCode.SEEK_NEXT_COL;
-241} else {
-242  if (matchCode == 
MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
-243// Update column tracker to next 
column, As we use the column hint from the tracker to seek
-244// to next cell
-245columns.doneWithColumn(cell);
-246  }
-247  return matchCode;
+240  // when the number of cells exceed 
max version in scan, we should return SEEK_NEXT_COL match
+241  // code, but if current code is 
INCLUDE_AND_SEEK_NEXT_ROW, we can optimize to choose the max
+242  // step between SEEK_NEXT_COL and 
INCLUDE_AND_SEEK_NEXT_ROW, which is SEEK_NEXT_ROW.
+243  if (matchCode == 
MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
+244matchCode = 
MatchCode.SEEK_NEXT_ROW;
+245  } else {
+246matchCode = 
MatchCode.SEEK_NEXT_COL;
+247  }
 248}
-249  }
-250
-251  protected abstract boolean isGet();
-252
-253  protected abstract boolean 
moreRowsMayExistsAfter(int cmpToStopRow);
-254
-255  @Override
-256  public boolean 
moreRowsMayExistAfter(Cell cell) {
-257// If a 'get' Scan -- we are doing a 
Get (every Get is a single-row Scan in implementation) --
-258// then we are looking at one row 
only, the one specified in the Get coordinate..so we know
-259// for sure that there are no more 
rows on this Scan
-260if (isGet()) {
-261  return false;
-262}
-263// If no stopRow, return that there 
may be more rows. The tests that follow depend on a
-264// non-empty, non-default stopRow so 
this little test below short-circuits out doing the
-265// following compares.
-266if (this.stopRow == null || 
this.stopRow.length == 0) {
-267  return true;
+249if (matchCode == 
MatchCode.INCLUDE_AND_SEEK_NEXT_COL || matchCode == MatchCode.SEEK_NEXT_COL) 
{
+250  // Update column tracker to next 
column, As we use the column hint from the tracker to seek
+251  // to next cell (HBASE-19749)
+252  columns.doneWithColumn(cell);
+253}
+254return matchCode;
+255  }
+256
+257  protected abstract boolean isGet();
+258
+259  protected abstract boolean 
moreRowsMayExistsAfter(int cmpToStopRow);
+260
+261  @Override
+262  public boolean 
moreRowsMayExistAfter(Cell cell) {
+263// If a 'get' Scan -- we are doing a 
Get (every Get is a single-row Scan in implementation) --
+264// then we are looking at one row 
only, the one specified in the Get coordinate..so we know
+265// for sure that there are no more 
rows on this Scan
+266if (isGet()) {
+267  return false;
 268}
-269return 
moreRowsMayExistsAfter(rowComparator.compareRows(cell, stopRow, 0, 
stopRow.length));
-270  }
-271
-272  public static UserScanQueryMatcher 
create(Scan scan, ScanInfo scanInfo,
-273  NavigableSetbyte[] columns, 
long oldestUnexpiredTS, long now,
-274  RegionCoprocessorHost 
regionCoprocessorHost) throws IOException {
-275boolean hasNullColumn =
-276!(columns != null  
columns.size() != 0  columns.first().length != 0);
-277PairDeleteTracker, 
ColumnTracker trackers = getTrackers(regionCoprocessorHost, columns,
-278scanInfo, oldestUnexpiredTS, 
scan);
-279DeleteTracker deleteTracker = 
trackers.getFirst();
-280ColumnTracker columnTracker = 
trackers.getSecond();
-281if (scan.isRaw()) {
-282  return 
RawScanQueryMatcher.create(scan, scanInfo, columnTracker, hasNullColumn,
-283oldestUnexpiredTS, now);
-284} else {
-285  return 
NormalUserScanQueryMatcher.create(scan, scanInfo, columnTracker, 
deleteTracker,
-286  hasNullColumn, 
oldestUnexpiredTS, now);
-287}
-288  }
-289}
+269// If no stopRow, return that there 
may be more rows. The tests that follow depend on a
+270// non-empty, non-default stopRow so 
this little test below short-circuits out doing the
+271// following compares.
+272if (this.stopRow == null || 
this.stopRow.length == 0) {
+273  return true;
+274}
+275return 
moreRowsMayExistsAfter(rowComparator.compareRows(cell, stopRow, 0, 
stopRow.length));
+276  }
+277
+278  public static UserScanQueryMatcher 
create(Scan scan, ScanInfo scanInfo,
+279  NavigableSetbyte[] columns, 
long oldestUnexpiredTS, long 

[30/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index b14bbed..93cf760 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -1490,9 +1490,9 @@
 1482// unless I pass along via the 
conf.
 1483Configuration confForWAL = new 
Configuration(c);
 1484confForWAL.set(HConstants.HBASE_DIR, 
rootdir.toString());
-1485WAL wal = (new 
WALFactory(confForWAL,
-1486
Collections.WALActionsListener singletonList(new MetricsWAL()), 
walFactoryID))
-1487
.getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace());
+1485WAL wal =
+1486  new WALFactory(confForWAL, 
Collections.WALActionsListener singletonList(new MetricsWAL()),
+1487  
walFactoryID).getWAL(metaHRI);
 1488HRegion meta = 
HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
 1489
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
 1490return meta;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index b14bbed..93cf760 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -1490,9 +1490,9 @@
 1482// unless I pass along via the 
conf.
 1483Configuration confForWAL = new 
Configuration(c);
 1484confForWAL.set(HConstants.HBASE_DIR, 
rootdir.toString());
-1485WAL wal = (new 
WALFactory(confForWAL,
-1486
Collections.WALActionsListener singletonList(new MetricsWAL()), 
walFactoryID))
-1487
.getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace());
+1485WAL wal =
+1486  new WALFactory(confForWAL, 
Collections.WALActionsListener singletonList(new MetricsWAL()),
+1487  
walFactoryID).getWAL(metaHRI);
 1488HRegion meta = 
HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
 1489
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
 1490return meta;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index b14bbed..93cf760 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -1490,9 +1490,9 @@
 1482// unless I pass along via the 
conf.
 1483Configuration confForWAL = new 
Configuration(c);
 1484confForWAL.set(HConstants.HBASE_DIR, 
rootdir.toString());
-1485WAL wal = (new 
WALFactory(confForWAL,
-1486
Collections.WALActionsListener singletonList(new MetricsWAL()), 
walFactoryID))
-1487
.getWAL(metaHRI.getEncodedNameAsBytes(), metaHRI.getTable().getNamespace());
+1485WAL wal =
+1486  new WALFactory(confForWAL, 
Collections.WALActionsListener singletonList(new MetricsWAL()),
+1487  
walFactoryID).getWAL(metaHRI);
 1488HRegion meta = 
HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
 1489
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
 1490return meta;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index b14bbed..93cf760 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -1490,9 +1490,9 @@
 1482// unless I pass along via the 
conf.
 1483Configuration confForWAL = new 

[37/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index e743560..163ade0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -2124,1654 +2124,1642 @@
 2116return healthy;
 2117  }
 2118
-2119  private static final byte[] 
UNSPECIFIED_REGION = new byte[]{};
-2120
-2121  @Override
-2122  public ListWAL getWALs() 
throws IOException {
-2123return walFactory.getWALs();
-2124  }
-2125
-2126  @Override
-2127  public WAL getWAL(RegionInfo 
regionInfo) throws IOException {
-2128WAL wal;
-2129// _ROOT_ and hbase:meta regions 
have separate WAL.
-2130if (regionInfo != null  
regionInfo.isMetaRegion()
-2131 
regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-2132  wal = 
walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
-2133} else if (regionInfo == null) {
-2134  wal = 
walFactory.getWAL(UNSPECIFIED_REGION, null);
-2135} else {
-2136  byte[] namespace = 
regionInfo.getTable().getNamespace();
-2137  wal = 
walFactory.getWAL(regionInfo.getEncodedNameAsBytes(), namespace);
-2138}
-2139if (this.walRoller != null) {
-2140  this.walRoller.addWAL(wal);
-2141}
-2142return wal;
-2143  }
-2144
-2145  public LogRoller getWalRoller() {
-2146return walRoller;
-2147  }
-2148
-2149  @Override
-2150  public Connection getConnection() {
-2151return getClusterConnection();
-2152  }
-2153
-2154  @Override
-2155  public ClusterConnection 
getClusterConnection() {
-2156return this.clusterConnection;
-2157  }
-2158
-2159  @Override
-2160  public MetaTableLocator 
getMetaTableLocator() {
-2161return this.metaTableLocator;
-2162  }
-2163
-2164  @Override
-2165  public void stop(final String msg) {
-2166stop(msg, false, 
RpcServer.getRequestUser().orElse(null));
-2167  }
-2168
-2169  /**
-2170   * Stops the regionserver.
-2171   * @param msg Status message
-2172   * @param force True if this is a 
regionserver abort
-2173   * @param user The user executing the 
stop request, or null if no user is associated
-2174   */
-2175  public void stop(final String msg, 
final boolean force, final User user) {
-2176if (!this.stopped) {
-2177  LOG.info("* STOPPING region 
server '" + this + "' *");
-2178  if (this.rsHost != null) {
-2179// when forced via abort don't 
allow CPs to override
-2180try {
-2181  this.rsHost.preStop(msg, 
user);
-2182} catch (IOException ioe) {
-2183  if (!force) {
-2184LOG.warn("The region server 
did not stop", ioe);
-2185return;
-2186  }
-2187  LOG.warn("Skipping coprocessor 
exception on preStop() due to forced shutdown", ioe);
-2188}
-2189  }
-2190  this.stopped = true;
-2191  LOG.info("STOPPED: " + msg);
-2192  // Wakes run() if it is sleeping
-2193  sleeper.skipSleepCycle();
-2194}
-2195  }
-2196
-2197  public void waitForServerOnline(){
-2198while (!isStopped()  
!isOnline()) {
-2199  synchronized (online) {
-2200try {
-2201  online.wait(msgInterval);
-2202} catch (InterruptedException 
ie) {
-2203  
Thread.currentThread().interrupt();
-2204  break;
-2205}
-2206  }
-2207}
-2208  }
-2209
-2210  @Override
-2211  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2212  throws KeeperException, 
IOException {
-2213HRegion r = context.getRegion();
-2214long masterSystemTime = 
context.getMasterSystemTime();
-2215rpcServices.checkOpen();
-2216LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2217// Do checks to see if we need to 
compact (references or too many files)
-2218for (HStore s : r.stores.values()) 
{
-2219  if (s.hasReferences() || 
s.needsCompaction()) {
-2220
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2221  }
-}
-2223long openSeqNum = 
r.getOpenSeqNum();
-2224if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2225  // If we opened a region, we 
should have read some sequence number from it.
-2226  LOG.error("No sequence number 
found when opening " +
-2227
r.getRegionInfo().getRegionNameAsString());
-2228  openSeqNum = 0;
-2229}
+2119  @Override
+2120  public ListWAL getWALs() 
throws IOException {
+2121return walFactory.getWALs();
+2122  }
+2123
+2124  @Override
+2125  public WAL getWAL(RegionInfo 

[21/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
index 59680d7..e118e2f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
@@ -30,87 +30,86 @@
 022import java.io.IOException;
 023import java.util.List;
 024import 
java.util.concurrent.CompletableFuture;
-025
-026import 
org.apache.hadoop.conf.Configuration;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028// imports for things that haven't moved 
from regionserver.wal yet.
-029import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-030
-031/**
-032 * The Write Ahead Log (WAL) stores all 
durable edits to the HRegion.
-033 * This interface provides the entry 
point for all WAL implementors.
-034 * p
-035 * See {@link FSHLogProvider} for an 
example implementation.
-036 *
-037 * A single WALProvider will be used for 
retrieving multiple WALs in a particular region server
-038 * and must be threadsafe.
-039 */
-040@InterfaceAudience.Private
-041public interface WALProvider {
-042
-043  /**
-044   * Set up the provider to create 
wals.
-045   * will only be called once per 
instance.
-046   * @param factory factory that made us 
may not be null
-047   * @param conf may not be null
-048   * @param listeners may be null
-049   * @param providerId differentiate 
between providers from one factory. may be null
-050   */
-051  void init(final WALFactory factory, 
final Configuration conf,
-052  final 
ListWALActionsListener listeners, final String providerId) throws 
IOException;
-053
-054  /**
-055   * @param identifier may not be null. 
contents will not be altered.
-056   * @param namespace could be null, and 
will use default namespace if null
-057   * @return a WAL for writing entries 
for the given region.
-058   */
-059  WAL getWAL(final byte[] identifier, 
byte[] namespace) throws IOException;
-060
-061  /** @return the List of WALs that are 
used by this server
-062   */
-063  ListWAL getWALs();
-064
-065  /**
-066   * persist outstanding WALs to storage 
and stop accepting new appends.
-067   * This method serves as shorthand for 
sending a sync to every WAL provided by a given
-068   * implementation. Those WALs will also 
stop accepting new writes.
-069   */
-070  void shutdown() throws IOException;
-071
-072  /**
-073   * shutdown utstanding WALs and clean 
up any persisted state.
-074   * Call this method only when you will 
not need to replay any of the edits to the WALs from
-075   * this provider. After this call 
completes, the underlying resources should have been reclaimed.
-076   */
-077  void close() throws IOException;
-078
-079  interface WriterBase extends Closeable 
{
-080long getLength();
-081  }
-082
-083  // Writers are used internally. Users 
outside of the WAL should be relying on the
-084  // interface provided by WAL.
-085  interface Writer extends WriterBase {
-086void sync() throws IOException;
-087void append(WAL.Entry entry) throws 
IOException;
-088  }
-089
-090  interface AsyncWriter extends 
WriterBase {
-091CompletableFutureLong 
sync();
-092void append(WAL.Entry entry);
-093  }
-094
-095  /**
-096   * Get number of the log files this 
provider is managing
-097   */
-098  long getNumLogFiles();
-099
-100  /**
-101   * Get size of the log files this 
provider is managing
-102   */
-103  long getLogFileSize();
-104
-105}
+025import 
org.apache.hadoop.conf.Configuration;
+026import 
org.apache.hadoop.hbase.client.RegionInfo;
+027import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+028import 
org.apache.yetus.audience.InterfaceAudience;
+029
+030/**
+031 * The Write Ahead Log (WAL) stores all 
durable edits to the HRegion.
+032 * This interface provides the entry 
point for all WAL implementors.
+033 * p
+034 * See {@link FSHLogProvider} for an 
example implementation.
+035 *
+036 * A single WALProvider will be used for 
retrieving multiple WALs in a particular region server
+037 * and must be threadsafe.
+038 */
+039@InterfaceAudience.Private
+040public interface WALProvider {
+041
+042  /**
+043   * Set up the provider to create 
wals.
+044   * will only be called once per 
instance.
+045   * @param factory factory that made us 
may not be null
+046   * @param conf may not be null
+047   * @param listeners may be null
+048   * @param providerId differentiate 
between providers from one factory. may be null
+049   */
+050  void init(WALFactory factory, 
Configuration conf, ListWALActionsListener listeners,
+051  String providerId) throws 
IOException;
+052
+053  /**
+054   * @param region the region which we 

[01/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 096cff083 -> f183e80f4


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
index 744f161..17918aa 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.html
@@ -275,7 +275,7 @@ implements org.apache.hadoop.util.Tool
 closeRegion(org.apache.hadoop.hbase.regionserver.HRegionregion)
 
 
-private static 
org.apache.hadoop.hbase.HTableDescriptor
+private static 
org.apache.hadoop.hbase.client.TableDescriptor
 createHTableDescriptor(intregionNum,
   intnumFamilies)
 
@@ -298,9 +298,9 @@ implements org.apache.hadoop.util.Tool
 
 
 private 
org.apache.hadoop.hbase.regionserver.HRegion
-openRegion(org.apache.hadoop.fs.FileSystemfs,
+openRegion(org.apache.hadoop.fs.FileSystemfs,
   org.apache.hadoop.fs.Pathdir,
-  org.apache.hadoop.hbase.HTableDescriptorhtd,
+  org.apache.hadoop.hbase.client.TableDescriptorhtd,
   org.apache.hadoop.hbase.wal.WALFactorywals,
   longwhenToRoll,
   
org.apache.hadoop.hbase.regionserver.LogRollerroller)
@@ -526,7 +526,7 @@ implements org.apache.hadoop.util.Tool
 
 
 walsListenedTo
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.wal.WAL walsListenedTo
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.wal.WAL walsListenedTo
 
 
 
@@ -575,7 +575,7 @@ implements org.apache.hadoop.util.Tool
 
 
 run
-publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Specified by:
@@ -591,8 +591,8 @@ implements org.apache.hadoop.util.Tool
 
 
 createHTableDescriptor
-private staticorg.apache.hadoop.hbase.HTableDescriptorcreateHTableDescriptor(intregionNum,
-   
intnumFamilies)
+private 
staticorg.apache.hadoop.hbase.client.TableDescriptorcreateHTableDescriptor(intregionNum,
+   
  intnumFamilies)
 
 
 
@@ -601,7 +601,7 @@ implements org.apache.hadoop.util.Tool
 
 
 verify
-privatelongverify(org.apache.hadoop.hbase.wal.WALFactorywals,
+privatelongverify(org.apache.hadoop.hbase.wal.WALFactorywals,
 org.apache.hadoop.fs.Pathwal,
 booleanverbose)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -624,7 +624,7 @@ implements org.apache.hadoop.util.Tool
 
 
 logBenchmarkResult
-private staticvoidlogBenchmarkResult(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
+private staticvoidlogBenchmarkResult(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtestName,
longnumTests,
longtotalTime)
 
@@ -635,18 +635,18 @@ implements org.apache.hadoop.util.Tool
 
 
 printUsageAndExit
-privatevoidprintUsageAndExit()
+privatevoidprintUsageAndExit()
 
 
-
+
 
 
 
 
 openRegion
-privateorg.apache.hadoop.hbase.regionserver.HRegionopenRegion(org.apache.hadoop.fs.FileSystemfs,
+privateorg.apache.hadoop.hbase.regionserver.HRegionopenRegion(org.apache.hadoop.fs.FileSystemfs,
 
org.apache.hadoop.fs.Pathdir,
-
org.apache.hadoop.hbase.HTableDescriptorhtd,
+
org.apache.hadoop.hbase.client.TableDescriptorhtd,
 
org.apache.hadoop.hbase.wal.WALFactorywals,
 
longwhenToRoll,
 
org.apache.hadoop.hbase.regionserver.LogRollerroller)
@@ -663,7 +663,7 @@ implements 

[02/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
index 75f9a0f..e18b27b 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestWALSplit
+public class TestWALSplit
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Testing WAL splitting code.
 
@@ -293,8 +293,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private static void
-appendCompactionEvent(org.apache.hadoop.hbase.wal.WALProvider.Writerw,
- org.apache.hadoop.hbase.HRegionInfohri,
+appendCompactionEvent(org.apache.hadoop.hbase.wal.WALProvider.Writerw,
+ org.apache.hadoop.hbase.client.RegionInfohri,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]inputs,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringoutput)
 
@@ -601,7 +601,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -610,7 +610,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-private staticorg.apache.hadoop.conf.Configuration conf
+private staticorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -619,7 +619,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-privateorg.apache.hadoop.fs.FileSystem fs
+privateorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -628,7 +628,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-protected static finalHBaseTestingUtility TEST_UTIL
+protected static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -637,7 +637,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HBASEDIR
-privateorg.apache.hadoop.fs.Path HBASEDIR
+privateorg.apache.hadoop.fs.Path HBASEDIR
 
 
 
@@ -646,7 +646,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HBASELOGDIR
-privateorg.apache.hadoop.fs.Path HBASELOGDIR
+privateorg.apache.hadoop.fs.Path HBASELOGDIR
 
 
 
@@ -655,7 +655,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WALDIR
-privateorg.apache.hadoop.fs.Path WALDIR
+privateorg.apache.hadoop.fs.Path WALDIR
 
 
 
@@ -664,7 +664,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 OLDLOGDIR
-privateorg.apache.hadoop.fs.Path OLDLOGDIR
+privateorg.apache.hadoop.fs.Path OLDLOGDIR
 
 
 
@@ -673,7 +673,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CORRUPTDIR
-privateorg.apache.hadoop.fs.Path CORRUPTDIR
+privateorg.apache.hadoop.fs.Path CORRUPTDIR
 
 
 
@@ -682,7 +682,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLEDIR
-privateorg.apache.hadoop.fs.Path TABLEDIR
+privateorg.apache.hadoop.fs.Path TABLEDIR
 
 
 
@@ -691,7 +691,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_WRITERS
-private static finalint NUM_WRITERS
+private static finalint NUM_WRITERS
 
 See Also:
 Constant
 Field Values
@@ -704,7 +704,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ENTRIES
-private static finalint ENTRIES
+private static finalint ENTRIES
 
 See Also:
 Constant
 Field Values
@@ -717,7 +717,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FILENAME_BEING_SPLIT
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
 
 See Also:
 Constant
 Field Values
@@ -730,7 +730,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_NAME
-private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
+private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -739,7 +739,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private static finalbyte[] FAMILY
+private static finalbyte[] FAMILY
 
 
 
@@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 QUALIFIER
-private static 

[26/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.RegionGroupingStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.RegionGroupingStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.RegionGroupingStrategy.html
index 4fec5eb..63c9ca7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.RegionGroupingStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.RegionGroupingStrategy.html
@@ -35,254 +35,262 @@
 027import java.util.List;
 028import 
java.util.concurrent.ConcurrentHashMap;
 029import 
java.util.concurrent.ConcurrentMap;
-030
+030import java.util.concurrent.locks.Lock;
 031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import org.slf4j.Logger;
-034import org.slf4j.LoggerFactory;
-035// imports for classes still in 
regionserver.wal
-036import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-037import 
org.apache.hadoop.hbase.util.Bytes;
-038import 
org.apache.hadoop.hbase.util.IdLock;
-039
-040/**
-041 * A WAL Provider that returns a WAL per 
group of regions.
-042 *
-043 * This provider follows the decorator 
pattern and mainly holds the logic for WAL grouping.
-044 * WAL creation/roll/close is delegated 
to {@link #DELEGATE_PROVIDER}
-045 *
-046 * Region grouping is handled via {@link 
RegionGroupingStrategy} and can be configured via the
-047 * property 
"hbase.wal.regiongrouping.strategy". Current strategy choices are
-048 * ul
-049 *   
liemdefaultStrategy/em : Whatever strategy this version 
of HBase picks. currently
-050 *  
"bounded"./li
-051 *   
liemidentity/em : each region belongs to its own 
group./li
-052 *   
liembounded/em : bounded number of groups and region 
evenly assigned to each group./li
-053 * /ul
-054 * Optionally, a FQCN to a custom 
implementation may be given.
-055 */
-056@InterfaceAudience.Private
-057public class RegionGroupingProvider 
implements WALProvider {
-058  private static final Logger LOG = 
LoggerFactory.getLogger(RegionGroupingProvider.class);
-059
-060  /**
-061   * Map identifiers to a group number.
-062   */
-063  public static interface 
RegionGroupingStrategy {
-064String GROUP_NAME_DELIMITER = ".";
-065
-066/**
-067 * Given an identifier and a 
namespace, pick a group.
-068 */
-069String group(final byte[] identifier, 
byte[] namespace);
-070void init(Configuration config, 
String providerId);
-071  }
-072
-073  /**
-074   * Maps between configuration names for 
strategies and implementation classes.
-075   */
-076  static enum Strategies {
-077
defaultStrategy(BoundedGroupingStrategy.class),
-078
identity(IdentityGroupingStrategy.class),
-079
bounded(BoundedGroupingStrategy.class),
-080
namespace(NamespaceGroupingStrategy.class);
-081
-082final Class? extends 
RegionGroupingStrategy clazz;
-083Strategies(Class? extends 
RegionGroupingStrategy clazz) {
-084  this.clazz = clazz;
-085}
-086  }
-087
-088  /**
-089   * instantiate a strategy from a config 
property.
-090   * requires conf to have already been 
set (as well as anything the provider might need to read).
-091   */
-092  RegionGroupingStrategy 
getStrategy(final Configuration conf, final String key,
-093  final String defaultValue) throws 
IOException {
-094Class? extends 
RegionGroupingStrategy clazz;
-095try {
-096  clazz = 
Strategies.valueOf(conf.get(key, defaultValue)).clazz;
-097} catch (IllegalArgumentException 
exception) {
-098  // Fall back to them specifying a 
class name
-099  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-100  // when there is a config value 
present.
-101  clazz = conf.getClass(key, 
IdentityGroupingStrategy.class, RegionGroupingStrategy.class);
-102}
-103LOG.info("Instantiating 
RegionGroupingStrategy of type " + clazz);
-104try {
-105  final RegionGroupingStrategy result 
= clazz.newInstance();
-106  result.init(conf, providerId);
-107  return result;
-108} catch (InstantiationException 
exception) {
-109  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-110  REGION_GROUPING_STRATEGY);
-111  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-112  throw new IOException("couldn't set 
up region grouping strategy", exception);
-113} catch (IllegalAccessException 
exception) {
-114  LOG.error("couldn't set up region 
grouping strategy, check config key " +
-115  REGION_GROUPING_STRATEGY);
-116  LOG.debug("Exception details for 
failure to load region grouping strategy.", exception);
-117   

[09/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
index 9694561..2bcceb6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHStore
+public class TestHStore
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Test class for the HStore
 
@@ -613,7 +613,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -622,7 +622,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -631,7 +631,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-org.apache.hadoop.hbase.regionserver.HRegion region
+org.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -640,7 +640,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 store
-org.apache.hadoop.hbase.regionserver.HStore store
+org.apache.hadoop.hbase.regionserver.HStore store
 
 
 
@@ -649,7 +649,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 table
-byte[] table
+byte[] table
 
 
 
@@ -658,7 +658,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 family
-byte[] family
+byte[] family
 
 
 
@@ -667,7 +667,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 row
-byte[] row
+byte[] row
 
 
 
@@ -676,7 +676,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 row2
-byte[] row2
+byte[] row2
 
 
 
@@ -685,7 +685,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf1
-byte[] qf1
+byte[] qf1
 
 
 
@@ -694,7 +694,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf2
-byte[] qf2
+byte[] qf2
 
 
 
@@ -703,7 +703,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf3
-byte[] qf3
+byte[] qf3
 
 
 
@@ -712,7 +712,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf4
-byte[] qf4
+byte[] qf4
 
 
 
@@ -721,7 +721,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf5
-byte[] qf5
+byte[] qf5
 
 
 
@@ -730,7 +730,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qf6
-byte[] qf6
+byte[] qf6
 
 
 
@@ -739,7 +739,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 qualifiers
-http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in java.util">NavigableSetbyte[] qualifiers
+http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in java.util">NavigableSetbyte[] qualifiers
 
 
 
@@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 expected
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.Cell expected
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.Cell expected
 
 
 
@@ -757,7 +757,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 result
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.Cell result
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.Cell result
 
 
 
@@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 id
-long id
+long id
 
 
 
@@ -775,7 +775,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 get
-org.apache.hadoop.hbase.client.Get get
+org.apache.hadoop.hbase.client.Get get
 
 
 
@@ -784,7 +784,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -793,7 +793,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DIR
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

hbase git commit: HBASE-19694 The initialization order for a fresh cluster is incorrect

2018-01-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 25e4bf8f3 -> 1a11fc92b


HBASE-19694 The initialization order for a fresh cluster is incorrect

Become active Master before calling the super class's run method. Have
the wait-on-becoming-active-Master be in-line rather than off in a
background thread (i.e. undo running thread in startActiveMasterManager)

Purge the fragile HBASE-16367 hackery that attempted to fix this issue
previously by adding a latch to try and hold up superclass RegionServer
until cluster id set by subclass Master.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1a11fc92
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1a11fc92
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1a11fc92

Branch: refs/heads/branch-2
Commit: 1a11fc92b16835808848b054d30b210d5572e492
Parents: 25e4bf8
Author: Michael Stack 
Authored: Tue Jan 9 12:49:39 2018 -0800
Committer: Michael Stack 
Committed: Thu Jan 11 14:25:25 2018 -0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 122 ++-
 .../hbase/regionserver/HRegionServer.java   |   7 --
 .../hbase/master/TestTableStateManager.java |   2 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |   1 -
 4 files changed, 64 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1a11fc92/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 945f54d..ee7cd18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -40,7 +40,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
 import java.util.Set;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
@@ -524,12 +523,9 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 
   // Some unit tests don't need a cluster, so no zookeeper at all
   if (!conf.getBoolean("hbase.testing.nocluster", false)) {
-setInitLatch(new CountDownLatch(1));
-activeMasterManager = new ActiveMasterManager(zooKeeper, 
this.serverName, this);
-int infoPort = putUpJettyServer();
-startActiveMasterManager(infoPort);
+this.activeMasterManager = new ActiveMasterManager(zooKeeper, 
this.serverName, this);
   } else {
-activeMasterManager = null;
+this.activeMasterManager = null;
   }
 } catch (Throwable t) {
   // Make sure we log the exception. HMaster is often started via 
reflection and the
@@ -539,10 +535,27 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
-  // Main run loop. Calls through to the regionserver run loop.
+  // Main run loop. Calls through to the regionserver run loop AFTER becoming 
active Master; will
+  // block in here until then.
   @Override
   public void run() {
 try {
+  if (!conf.getBoolean("hbase.testing.nocluster", false)) {
+try {
+  int infoPort = putUpJettyServer();
+  startActiveMasterManager(infoPort);
+} catch (Throwable t) {
+  // Make sure we log the exception.
+  String error = "Failed to become Active Master";
+  LOG.error(error, t);
+  // Abort should have been called already.
+  if (!isAborted()) {
+abort(error, t);
+  }
+}
+  }
+  // Fall in here even if we have been aborted. Need to run the shutdown 
services and
+  // the super run call will do this for us.
   super.run();
 } finally {
   if (this.clusterSchemaService != null) {
@@ -757,9 +770,9 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   private void finishActiveMasterInitialization(MonitoredTask status)
   throws IOException, InterruptedException, KeeperException, 
CoordinatedStateException {
 
-activeMaster = true;
 Thread zombieDetector = new Thread(new InitializationMonitor(this),
 "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
+zombieDetector.setDaemon(true);
 zombieDetector.start();
 
 /*
@@ -783,10 +796,9 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   this.tableDescriptors.getAll();
 }
 
-// publish cluster ID
+// Publish cluster ID
 status.setStatus("Publishing Cluster ID in ZooKeeper");
 

[2/2] hbase git commit: HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner timeout.

2018-01-11 Thread appy
HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing 
scanner timeout.

Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d4140e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d4140e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d4140e0

Branch: refs/heads/branch-2
Commit: 5d4140e09d8ecf16a6e7d845129759c30e62f9af
Parents: 31fe5db
Author: Chia-Ping Tsai 
Authored: Thu Jan 11 10:27:18 2018 -0800
Committer: Apekshit Sharma 
Committed: Thu Jan 11 10:35:36 2018 -0800

--
 .../org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d4140e0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index bd66bde..bbf06fd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -188,6 +188,7 @@ public class TestFSErrorsExposed {
 try {
   // Make it fail faster.
   util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
1);
+  
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
9);
   util.getConfiguration().setInt("hbase.lease.recovery.timeout", 1);
   util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
   util.startMiniCluster(1);



[1/2] hbase git commit: Revert due to missing author. "HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner timeout."

2018-01-11 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 0b9dc14db -> c88e0


Revert due to missing author. "HBASE-19685 Fix 
TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner 
timeout."

This reverts commit 0b9dc14dbca98565d29bcd2be5100e29ebcf0e65.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/15bb49d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/15bb49d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/15bb49d8

Branch: refs/heads/master
Commit: 15bb49d803d873c07ebdd6bf77908481f008d07a
Parents: 0b9dc14
Author: Apekshit Sharma 
Authored: Thu Jan 11 10:33:15 2018 -0800
Committer: Apekshit Sharma 
Committed: Thu Jan 11 10:34:42 2018 -0800

--
 .../org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/15bb49d8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index bbf06fd..bd66bde 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -188,7 +188,6 @@ public class TestFSErrorsExposed {
 try {
   // Make it fail faster.
   util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
1);
-  
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
9);
   util.getConfiguration().setInt("hbase.lease.recovery.timeout", 1);
   util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
   util.startMiniCluster(1);



[2/2] hbase git commit: HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner timeout.

2018-01-11 Thread appy
HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing 
scanner timeout.

Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c88e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c88e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c88e

Branch: refs/heads/master
Commit: c88e0826c0f700fd3cd70385a7b25b33521f
Parents: 15bb49d
Author: Chia-Ping Tsai 
Authored: Thu Jan 11 10:27:18 2018 -0800
Committer: Apekshit Sharma 
Committed: Thu Jan 11 10:34:54 2018 -0800

--
 .../org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c88e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index bd66bde..bbf06fd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -188,6 +188,7 @@ public class TestFSErrorsExposed {
 try {
   // Make it fail faster.
   util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
1);
+  
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
9);
   util.getConfiguration().setInt("hbase.lease.recovery.timeout", 1);
   util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
   util.startMiniCluster(1);



[1/2] hbase git commit: Revert due to missing author. "HBASE-19685 Fix TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner timeout."

2018-01-11 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 bf3c20778 -> 5d4140e09


Revert due to missing author. "HBASE-19685 Fix 
TestFSErrorsExposed#testFullSystemBubblesFSErrors by increasing scanner 
timeout."

This reverts commit 0b9dc14dbca98565d29bcd2be5100e29ebcf0e65.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31fe5dbf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31fe5dbf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31fe5dbf

Branch: refs/heads/branch-2
Commit: 31fe5dbf6b3a261f2c902d0fd6b82bf6c7ecf954
Parents: bf3c207
Author: Apekshit Sharma 
Authored: Thu Jan 11 10:33:15 2018 -0800
Committer: Apekshit Sharma 
Committed: Thu Jan 11 10:35:32 2018 -0800

--
 .../org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/31fe5dbf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index bbf06fd..bd66bde 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -188,7 +188,6 @@ public class TestFSErrorsExposed {
 try {
   // Make it fail faster.
   util.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
1);
-  
util.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 
9);
   util.getConfiguration().setInt("hbase.lease.recovery.timeout", 1);
   util.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
   util.startMiniCluster(1);



hbase git commit: HBASE-19483 Add proper privilege check for rsgroup commands addendum

2018-01-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0ef6530aa -> b29a138ec


HBASE-19483 Add proper privilege check for rsgroup commands addendum

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b29a138e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b29a138e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b29a138e

Branch: refs/heads/branch-1
Commit: b29a138ecc633b852f6a54b3f320bf873e88365d
Parents: 0ef6530
Author: Guangxu Cheng 
Authored: Thu Jan 11 12:05:13 2018 +0800
Committer: tedyu 
Committed: Thu Jan 11 11:00:07 2018 -0800

--
 .../hbase/security/access/AccessController.java | 232 ++-
 .../visibility/VisibilityController.java|   8 +-
 2 files changed, 174 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b29a138e/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 97b3456..fd0a704 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -215,6 +215,10 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   /** if the ACL table is available, only relevant in the master */
   private volatile boolean aclTabAvailable = false;
 
+  public static boolean isAuthorizationSupported(Configuration conf) {
+return AccessChecker.isAuthorizationSupported(conf);
+  }
+
   public static boolean isCellAuthorizationSupported(Configuration conf) {
 return AccessChecker.isAuthorizationSupported(conf) &&
 (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
@@ -408,6 +412,106 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   }
 
   /**
+   * Authorizes that the current user has any of the given permissions for the
+   * given table, column family and column qualifier.
+   * @param tableName Table requested
+   * @param family Column family requested
+   * @param qualifier Column qualifier requested
+   * @throws IOException if obtaining the current user fails
+   * @throws AccessDeniedException if user has no authorization
+   */
+  public void requirePermission(String request, TableName tableName, byte[] 
family,
+  byte[] qualifier, Action... permissions) throws IOException {
+accessChecker.requirePermission(getActiveUser(), request,
+tableName, family, qualifier, permissions);
+  }
+
+  /**
+   * Authorizes that the current user has any of the given permissions for the
+   * given table, column family and column qualifier.
+   * @param tableName Table requested
+   * @param family Column family param
+   * @param qualifier Column qualifier param
+   * @throws IOException if obtaining the current user fails
+   * @throws AccessDeniedException if user has no authorization
+   */
+  public void requireTablePermission(String request, TableName tableName, 
byte[] family,
+  byte[] qualifier, Action... permissions) throws IOException {
+accessChecker.requireTablePermission(getActiveUser(), request,
+tableName, family, qualifier, permissions);
+  }
+
+  /**
+   * Authorizes that the current user has any of the given permissions to 
access the table.
+   *
+   * @param tableName Table requested
+   * @param permissions Actions being requested
+   * @throws IOException if obtaining the current user fails
+   * @throws AccessDeniedException if user has no authorization
+   */
+  public void requireAccess(String request, TableName tableName,
+  Action... permissions) throws IOException {
+accessChecker.requireAccess(getActiveUser(), request, tableName, 
permissions);
+  }
+
+  /**
+   * Authorizes that the current user has global privileges for the given 
action.
+   * @param perm The action being requested
+   * @throws IOException if obtaining the current user fails
+   * @throws AccessDeniedException if authorization is denied
+   */
+  public void requirePermission(String request, Action perm) throws 
IOException {
+accessChecker.requirePermission(getActiveUser(), request, perm);
+  }
+
+  /**
+   * Checks that the user has the given global permission. The generated
+   * audit log message will contain context information for the operation
+   * being authorized, based on the given parameters.
+   * @param perm Action being requested
+   * @param tableName Affected table name.
+   * 

  1   2   >