hbase git commit: HBASE-19941 Flaky TestCreateTableProcedure times out in nightly, needs to LargeTests

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b9175680b -> e4aeb4617


HBASE-19941 Flaky TestCreateTableProcedure times out in nightly, needs to 
LargeTests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4aeb461
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4aeb461
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4aeb461

Branch: refs/heads/branch-2
Commit: e4aeb4617724b9b67c1f654a35dcfc5e496268e2
Parents: b917568
Author: Umesh Agashe 
Authored: Mon Feb 5 22:11:05 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 22:11:53 2018 -0800

--
 .../master/procedure/TestCreateTableProcedure.java| 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4aeb461/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 056155f..3fa756b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -57,14 +57,14 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 
   @Rule public TestName name = new TestName();
 
-  @Test(timeout=6)
+  @Test
   public void testSimpleCreate() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final byte[][] splitKeys = null;
 testSimpleCreate(tableName, splitKeys);
   }
 
-  @Test(timeout=6)
+  @Test
   public void testSimpleCreateWithSplits() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final byte[][] splitKeys = new byte[][] {
@@ -79,7 +79,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 MasterProcedureTestingUtility.validateTableCreation(getMaster(), 
tableName, regions, F1, F2);
   }
 
-  @Test(timeout=6)
+  @Test
   public void testCreateWithoutColumnFamily() throws Exception {
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
 final TableName tableName = TableName.valueOf(name.getMethodName());
@@ -101,7 +101,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 cause instanceof DoNotRetryIOException);
   }
 
-  @Test(timeout=6, expected=TableExistsException.class)
+  @Test(expected=TableExistsException.class)
   public void testCreateExisting() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
@@ -124,7 +124,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 latch2.await();
   }
 
-  @Test(timeout=6)
+  @Test
   public void testRecoveryAndDoubleExecution() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 
@@ -144,13 +144,13 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 MasterProcedureTestingUtility.validateTableCreation(getMaster(), 
tableName, regions, F1, F2);
   }
 
-  @Test(timeout=9)
+  @Test
   public void testRollbackAndDoubleExecution() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 
testRollbackAndDoubleExecution(TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName,
 F1, F2)));
   }
 
-  @Test(timeout=9)
+  @Test
   public void testRollbackAndDoubleExecutionOnMobTable() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, 
F1, F2);



hbase git commit: HBASE-19941 Flaky TestCreateTableProcedure times out in nightly, needs to LargeTests

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 11e3a8387 -> 3fa27c238


HBASE-19941 Flaky TestCreateTableProcedure times out in nightly, needs to 
LargeTests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3fa27c23
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3fa27c23
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3fa27c23

Branch: refs/heads/master
Commit: 3fa27c2382a70f692eb36d191228f9e796800ffb
Parents: 11e3a83
Author: Umesh Agashe 
Authored: Mon Feb 5 22:11:05 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 22:13:53 2018 -0800

--
 .../master/procedure/TestCreateTableProcedure.java| 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3fa27c23/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
index 056155f..3fa756b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -57,14 +57,14 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 
   @Rule public TestName name = new TestName();
 
-  @Test(timeout=6)
+  @Test
   public void testSimpleCreate() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final byte[][] splitKeys = null;
 testSimpleCreate(tableName, splitKeys);
   }
 
-  @Test(timeout=6)
+  @Test
   public void testSimpleCreateWithSplits() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final byte[][] splitKeys = new byte[][] {
@@ -79,7 +79,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 MasterProcedureTestingUtility.validateTableCreation(getMaster(), 
tableName, regions, F1, F2);
   }
 
-  @Test(timeout=6)
+  @Test
   public void testCreateWithoutColumnFamily() throws Exception {
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
 final TableName tableName = TableName.valueOf(name.getMethodName());
@@ -101,7 +101,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 cause instanceof DoNotRetryIOException);
   }
 
-  @Test(timeout=6, expected=TableExistsException.class)
+  @Test(expected=TableExistsException.class)
   public void testCreateExisting() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
@@ -124,7 +124,7 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 latch2.await();
   }
 
-  @Test(timeout=6)
+  @Test
   public void testRecoveryAndDoubleExecution() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 
@@ -144,13 +144,13 @@ public class TestCreateTableProcedure extends 
TestTableDDLProcedureBase {
 MasterProcedureTestingUtility.validateTableCreation(getMaster(), 
tableName, regions, F1, F2);
   }
 
-  @Test(timeout=9)
+  @Test
   public void testRollbackAndDoubleExecution() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 
testRollbackAndDoubleExecution(TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName,
 F1, F2)));
   }
 
-  @Test(timeout=9)
+  @Test
   public void testRollbackAndDoubleExecutionOnMobTable() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, 
F1, F2);



hbase git commit: HBASE-19934 HBaseSnapshotException when read replicas is enabled and online snapshot is taken after region splitting (Toshihiro Suzuki)

2018-02-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 53e570722 -> b9175680b


HBASE-19934 HBaseSnapshotException when read replicas is enabled and online 
snapshot is taken after region splitting (Toshihiro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b9175680
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b9175680
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b9175680

Branch: refs/heads/branch-2
Commit: b9175680b6e958a5e39b0d16f696201dbcd9ee7c
Parents: 53e5707
Author: tedyu 
Authored: Mon Feb 5 21:06:20 2018 -0800
Committer: tedyu 
Committed: Mon Feb 5 21:06:20 2018 -0800

--
 .../snapshot/EnabledTableSnapshotHandler.java   |  4 +++-
 .../client/TestRestoreSnapshotFromClient.java   | 20 ++--
 ...oreSnapshotFromClientWithRegionReplicas.java | 25 
 3 files changed, 41 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b9175680/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
index 6cf1200..0872443 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -98,7 +99,8 @@ public class EnabledTableSnapshotHandler extends 
TakeSnapshotHandler {
   // Take the offline regions as disabled
   for (Pair region : regions) {
 RegionInfo regionInfo = region.getFirst();
-if (regionInfo.isOffline() && (regionInfo.isSplit() || 
regionInfo.isSplitParent())) {
+if (regionInfo.isOffline() && (regionInfo.isSplit() || 
regionInfo.isSplitParent()) &&
+RegionReplicaUtil.isDefaultReplica(regionInfo)) {
   LOG.info("Take disabled snapshot of offline region=" + regionInfo);
   snapshotDisabledRegion(regionInfo);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9175680/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 3eb304d..2556bec 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -67,13 +68,13 @@ public class TestRestoreSnapshotFromClient {
   protected final byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
 
   protected TableName tableName;
-  private byte[] emptySnapshot;
-  private byte[] snapshotName0;
-  private byte[] snapshotName1;
-  private byte[] snapshotName2;
-  private int snapshot0Rows;
-  private int snapshot1Rows;
-  private Admin admin;
+  protected byte[] emptySnapshot;
+  protected byte[] snapshotName0;
+  protected byte[] snapshotName1;
+  protected byte[] snapshotName2;
+  protected int snapshot0Rows;
+  protected int snapshot1Rows;
+  protected Admin admin;
 
   @Rule
   public TestName name = new TestName();
@@ -321,4 +322,9 @@ public class TestRestoreSnapshotFromClient {
   protected int countRows(final Table table, final byte[]... families) throws 
IOException {
 return TEST_UTIL.countRows(table, families);
   }
+
+  protected void splitRegion(final RegionInfo regionInfo) throws IOException {
+byte[][] splitPoints = Bytes.split(regionInfo.getStartKey(), 
regionInfo.getEndKey(), 1);
+admin.split(regionInfo.getTable(), splitPoints[1]);
+  }
 }


hbase git commit: HBASE-19934 HBaseSnapshotException when read replicas is enabled and online snapshot is taken after region splitting (Toshihiro Suzuki)

2018-02-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 3bb8daa60 -> 11e3a8387


HBASE-19934 HBaseSnapshotException when read replicas is enabled and online 
snapshot is taken after region splitting (Toshihiro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/11e3a838
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/11e3a838
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/11e3a838

Branch: refs/heads/master
Commit: 11e3a8387006a5dd348fa91a336bce8d25455f4d
Parents: 3bb8daa
Author: tedyu 
Authored: Mon Feb 5 21:03:56 2018 -0800
Committer: tedyu 
Committed: Mon Feb 5 21:03:56 2018 -0800

--
 .../snapshot/EnabledTableSnapshotHandler.java   |  4 +++-
 .../client/TestRestoreSnapshotFromClient.java   | 20 ++--
 ...oreSnapshotFromClientWithRegionReplicas.java | 25 
 3 files changed, 41 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/11e3a838/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
index 6cf1200..0872443 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -98,7 +99,8 @@ public class EnabledTableSnapshotHandler extends 
TakeSnapshotHandler {
   // Take the offline regions as disabled
   for (Pair region : regions) {
 RegionInfo regionInfo = region.getFirst();
-if (regionInfo.isOffline() && (regionInfo.isSplit() || 
regionInfo.isSplitParent())) {
+if (regionInfo.isOffline() && (regionInfo.isSplit() || 
regionInfo.isSplitParent()) &&
+RegionReplicaUtil.isDefaultReplica(regionInfo)) {
   LOG.info("Take disabled snapshot of offline region=" + regionInfo);
   snapshotDisabledRegion(regionInfo);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/11e3a838/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 3eb304d..2556bec 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -67,13 +68,13 @@ public class TestRestoreSnapshotFromClient {
   protected final byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
 
   protected TableName tableName;
-  private byte[] emptySnapshot;
-  private byte[] snapshotName0;
-  private byte[] snapshotName1;
-  private byte[] snapshotName2;
-  private int snapshot0Rows;
-  private int snapshot1Rows;
-  private Admin admin;
+  protected byte[] emptySnapshot;
+  protected byte[] snapshotName0;
+  protected byte[] snapshotName1;
+  protected byte[] snapshotName2;
+  protected int snapshot0Rows;
+  protected int snapshot1Rows;
+  protected Admin admin;
 
   @Rule
   public TestName name = new TestName();
@@ -321,4 +322,9 @@ public class TestRestoreSnapshotFromClient {
   protected int countRows(final Table table, final byte[]... families) throws 
IOException {
 return TEST_UTIL.countRows(table, families);
   }
+
+  protected void splitRegion(final RegionInfo regionInfo) throws IOException {
+byte[][] splitPoints = Bytes.split(regionInfo.getStartKey(), 
regionInfo.getEndKey(), 1);
+admin.split(regionInfo.getTable(), splitPoints[1]);
+  }
 }


hbase git commit: HBASE-19939 Fixed NPE in tests TestSplitTableRegionProcedure#testSplitWithoutPONR() and testRecoveryAndDoubleExecution()

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master f5197979a -> 3bb8daa60


HBASE-19939 Fixed NPE in tests 
TestSplitTableRegionProcedure#testSplitWithoutPONR() and 
testRecoveryAndDoubleExecution()

Value of 'htd' is null as it is initialized in the constructor but when the 
object is deserialized its null. Got rid of member variable htd and made it 
local to method.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3bb8daa6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3bb8daa6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3bb8daa6

Branch: refs/heads/master
Commit: 3bb8daa60565ec2f7955352e52c2f6379176d8c6
Parents: f519797
Author: Umesh Agashe 
Authored: Mon Feb 5 12:08:49 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 20:48:56 2018 -0800

--
 .../hbase/master/assignment/SplitTableRegionProcedure.java   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3bb8daa6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 1828340..be0741d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -94,7 +94,6 @@ public class SplitTableRegionProcedure
   private RegionInfo daughter_1_RI;
   private RegionInfo daughter_2_RI;
   private byte[] bestSplitRow;
-  private TableDescriptor htd;
   private RegionSplitPolicy splitPolicy;
 
   public SplitTableRegionProcedure() {
@@ -120,14 +119,14 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
-this.htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-if(this.htd.getRegionSplitPolicyClassName() != null) {
+TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
+if(htd.getRegionSplitPolicyClassName() != null) {
   // Since we don't have region reference here, creating the split policy 
instance without it.
   // This can be used to invoke methods which don't require Region 
reference. This instantiation
   // of a class on Master-side though it only makes sense on the 
RegionServer-side is
   // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
   Class clazz =
-  RegionSplitPolicy.getSplitPolicyClass(this.htd, 
env.getMasterConfiguration());
+  RegionSplitPolicy.getSplitPolicyClass(htd, 
env.getMasterConfiguration());
   this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
 }
   }
@@ -611,6 +610,7 @@ public class SplitTableRegionProcedure
   maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
 final List>> futures = new 
ArrayList>>(nbFiles);
 
+TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
 // Split each store file.
 for (Map.Entrye: files.entrySet()) {
   byte [] familyName = Bytes.toBytes(e.getKey());



hbase git commit: HBASE-19939 Fixed NPE in tests TestSplitTableRegionProcedure#testSplitWithoutPONR() and testRecoveryAndDoubleExecution()

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8b6b3326a -> 53e570722


HBASE-19939 Fixed NPE in tests 
TestSplitTableRegionProcedure#testSplitWithoutPONR() and 
testRecoveryAndDoubleExecution()

Value of 'htd' is null as it is initialized in the constructor but when the 
object is deserialized its null. Got rid of member variable htd and made it 
local to method.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/53e57072
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/53e57072
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/53e57072

Branch: refs/heads/branch-2
Commit: 53e57072240b25051f568e77c9f70344ea80e7cf
Parents: 8b6b332
Author: Umesh Agashe 
Authored: Mon Feb 5 12:08:49 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 20:48:20 2018 -0800

--
 .../hbase/master/assignment/SplitTableRegionProcedure.java   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/53e57072/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 1828340..be0741d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -94,7 +94,6 @@ public class SplitTableRegionProcedure
   private RegionInfo daughter_1_RI;
   private RegionInfo daughter_2_RI;
   private byte[] bestSplitRow;
-  private TableDescriptor htd;
   private RegionSplitPolicy splitPolicy;
 
   public SplitTableRegionProcedure() {
@@ -120,14 +119,14 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
-this.htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-if(this.htd.getRegionSplitPolicyClassName() != null) {
+TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
+if(htd.getRegionSplitPolicyClassName() != null) {
   // Since we don't have region reference here, creating the split policy 
instance without it.
   // This can be used to invoke methods which don't require Region 
reference. This instantiation
   // of a class on Master-side though it only makes sense on the 
RegionServer-side is
   // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
   Class clazz =
-  RegionSplitPolicy.getSplitPolicyClass(this.htd, 
env.getMasterConfiguration());
+  RegionSplitPolicy.getSplitPolicyClass(htd, 
env.getMasterConfiguration());
   this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
 }
   }
@@ -611,6 +610,7 @@ public class SplitTableRegionProcedure
   maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
 final List>> futures = new 
ArrayList>>(nbFiles);
 
+TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
 // Split each store file.
 for (Map.Entrye: files.entrySet()) {
   byte [] familyName = Bytes.toBytes(e.getKey());



hbase git commit: HBASE-19927 Addendum join on RegionServerThread instead of HRegionServer

2018-02-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 5905415f9 -> 8b6b3326a


HBASE-19927 Addendum join on RegionServerThread instead of HRegionServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b6b3326
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b6b3326
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b6b3326

Branch: refs/heads/branch-2
Commit: 8b6b3326a6e7f3bae66dad6fb727b2cd983ab4a3
Parents: 5905415
Author: zhangduo 
Authored: Tue Feb 6 10:01:23 2018 +0800
Committer: zhangduo 
Committed: Tue Feb 6 10:01:41 2018 +0800

--
 .../java/org/apache/hadoop/hbase/HBaseTestingUtility.java   | 3 +--
 .../org/apache/hadoop/hbase/TestFullLogReconstruction.java  | 9 +
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b6b3326/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ecd2fa5..4f55199 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2635,11 +2635,10 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
* Expire a region server's session
* @param index which RS
*/
-  public HRegionServer expireRegionServerSession(int index) throws Exception {
+  public void expireRegionServerSession(int index) throws Exception {
 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
 expireSession(rs.getZooKeeper(), false);
 decrementMinRegionServerCount();
-return rs;
   }
 
   private void decrementMinRegionServerCount() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b6b3326/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
index 13c616f..87152fc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
@@ -22,10 +22,10 @@ import static org.junit.Assert.assertEquals;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -81,19 +81,20 @@ public class TestFullLogReconstruction {
 for (int i = 0; i < 4; i++) {
   TEST_UTIL.loadTable(table, FAMILY);
 }
-HRegionServer rs = TEST_UTIL.expireRegionServerSession(0);
+RegionServerThread rsThread = 
TEST_UTIL.getHBaseCluster().getRegionServerThreads().get(0);
+TEST_UTIL.expireRegionServerSession(0);
 // make sure that the RS is fully down before reading, so that we will 
read the data from other
 // RSes.
 TEST_UTIL.waitFor(3, new ExplainingPredicate() {
 
   @Override
   public boolean evaluate() throws Exception {
-return !rs.isAlive();
+return !rsThread.isAlive();
   }
 
   @Override
   public String explainFailure() throws Exception {
-return rs + " is still alive";
+return rsThread.getRegionServer() + " is still alive";
   }
 });
 



hbase git commit: HBASE-19927 Addendum join on RegionServerThread instead of HRegionServer

2018-02-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 6d04aa179 -> f5197979a


HBASE-19927 Addendum join on RegionServerThread instead of HRegionServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5197979
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5197979
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5197979

Branch: refs/heads/master
Commit: f5197979aaac7e36b6af36b86ea8dc8d7774fabe
Parents: 6d04aa1
Author: zhangduo 
Authored: Tue Feb 6 10:01:23 2018 +0800
Committer: zhangduo 
Committed: Tue Feb 6 10:01:23 2018 +0800

--
 .../java/org/apache/hadoop/hbase/HBaseTestingUtility.java   | 3 +--
 .../org/apache/hadoop/hbase/TestFullLogReconstruction.java  | 9 +
 2 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f5197979/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index ecd2fa5..4f55199 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2635,11 +2635,10 @@ public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
* Expire a region server's session
* @param index which RS
*/
-  public HRegionServer expireRegionServerSession(int index) throws Exception {
+  public void expireRegionServerSession(int index) throws Exception {
 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
 expireSession(rs.getZooKeeper(), false);
 decrementMinRegionServerCount();
-return rs;
   }
 
   private void decrementMinRegionServerCount() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f5197979/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
index 13c616f..87152fc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFullLogReconstruction.java
@@ -22,10 +22,10 @@ import static org.junit.Assert.assertEquals;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -81,19 +81,20 @@ public class TestFullLogReconstruction {
 for (int i = 0; i < 4; i++) {
   TEST_UTIL.loadTable(table, FAMILY);
 }
-HRegionServer rs = TEST_UTIL.expireRegionServerSession(0);
+RegionServerThread rsThread = 
TEST_UTIL.getHBaseCluster().getRegionServerThreads().get(0);
+TEST_UTIL.expireRegionServerSession(0);
 // make sure that the RS is fully down before reading, so that we will 
read the data from other
 // RSes.
 TEST_UTIL.waitFor(3, new ExplainingPredicate() {
 
   @Override
   public boolean evaluate() throws Exception {
-return !rs.isAlive();
+return !rsThread.isAlive();
   }
 
   @Override
   public String explainFailure() throws Exception {
-return rs + " is still alive";
+return rsThread.getRegionServer() + " is still alive";
   }
 });
 



hbase git commit: HBASE-19915 (addendum): Fixed a typo because of which only daughterA was getting stored with a CLOSED state and not daughterB

2018-02-05 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e69c1fd7a -> 5905415f9


HBASE-19915 (addendum): Fixed a typo because of which only daughterA was 
getting stored with a CLOSED state and not daughterB


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5905415f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5905415f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5905415f

Branch: refs/heads/branch-2
Commit: 5905415f9c909f7693ec37f08cadc3071e132dc9
Parents: e69c1fd
Author: Umesh Agashe 
Authored: Mon Feb 5 14:53:29 2018 -0800
Committer: Apekshit Sharma 
Committed: Mon Feb 5 15:41:28 2018 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5905415f/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 5dc0565..dad9aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1759,7 +1759,7 @@ public class MetaTableAccessor {
   // master tries to assign these offline regions. This is followed by 
re-assignments of the
   // daughter regions from resumed {@link SplitTableRegionProcedure}
   addRegionStateToPut(putA, RegionState.State.CLOSED);
-  addRegionStateToPut(putA, RegionState.State.CLOSED);
+  addRegionStateToPut(putB, RegionState.State.CLOSED);
 
   addSequenceNum(putA, 1, -1, splitA.getReplicaId()); //new regions, 
openSeqNum = 1 is fine.
   addSequenceNum(putB, 1, -1, splitB.getReplicaId());



hbase git commit: HBASE-19915 (addendum): Fixed a typo because of which only daughterA was getting stored with a CLOSED state and not daughterB

2018-02-05 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 8de820786 -> 6d04aa179


HBASE-19915 (addendum): Fixed a typo because of which only daughterA was 
getting stored with a CLOSED state and not daughterB


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d04aa17
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d04aa17
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d04aa17

Branch: refs/heads/master
Commit: 6d04aa179485f331deff328f511f9b494bdb4d43
Parents: 8de8207
Author: Umesh Agashe 
Authored: Mon Feb 5 14:53:29 2018 -0800
Committer: Apekshit Sharma 
Committed: Mon Feb 5 15:41:03 2018 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d04aa17/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 5dc0565..dad9aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1759,7 +1759,7 @@ public class MetaTableAccessor {
   // master tries to assign these offline regions. This is followed by 
re-assignments of the
   // daughter regions from resumed {@link SplitTableRegionProcedure}
   addRegionStateToPut(putA, RegionState.State.CLOSED);
-  addRegionStateToPut(putA, RegionState.State.CLOSED);
+  addRegionStateToPut(putB, RegionState.State.CLOSED);
 
   addSequenceNum(putA, 1, -1, splitA.getReplicaId()); //new regions, 
openSeqNum = 1 is fine.
   addSequenceNum(putB, 1, -1, splitB.getReplicaId());



[1/2] hbase git commit: HBASE-19922 remove ProtobufUtil::PRIMITIVES

2018-02-05 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7723a3d60 -> e69c1fd7a
  refs/heads/master 6307689a6 -> 8de820786


HBASE-19922 remove ProtobufUtil::PRIMITIVES


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8de82078
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8de82078
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8de82078

Branch: refs/heads/master
Commit: 8de820786ce8d708a6d2fd32b36ab9128bc6fc0f
Parents: 6307689
Author: Mike Drob 
Authored: Fri Feb 2 15:59:52 2018 -0600
Committer: Mike Drob 
Committed: Mon Feb 5 15:33:46 2018 -0600

--
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java  | 16 
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java  | 16 
 2 files changed, 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8de82078/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index eed911a..29ff2a2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -34,7 +34,6 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -123,11 +122,6 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Primitive type to class mapping.
-   */
-  private final static Map PRIMITIVES = new HashMap<>();
-
-  /**
* Many results are simple: no cell, exists true or false. To save on object 
creations,
*  we reuse them across calls.
*/
@@ -183,16 +177,6 @@ public final class ProtobufUtil {
 ClassLoader parent = ProtobufUtil.class.getClassLoader();
 Configuration conf = HBaseConfiguration.create();
 CLASS_LOADER = new DynamicClassLoader(conf, parent);
-
-PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE);
-PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE);
-PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE);
-PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE);
-PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE);
-PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE);
-PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE);
-PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE);
-PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/8de82078/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 06d9a3c..5bb3b4b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -27,7 +27,6 @@ import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -201,11 +200,6 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Primitive type to class mapping.
-   */
-  private final static Map PRIMITIVES = new HashMap<>();
-
-  /**
* Many results are simple: no cell, exists true or false. To save on object 
creations,
*  we reuse them across calls.
*/
@@ -260,16 +254,6 @@ public final class ProtobufUtil {
 ClassLoader parent = ProtobufUtil.class.getClassLoader();
 Configuration conf = HBaseConfiguration.create();
 CLASS_LOADER = new DynamicClassLoader(conf, parent);
-
-PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE);
-PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE);
-PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE);
-PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE);
-PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE);
-PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE);
-PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE);
-PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE);
-PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE);
   }
 
   /**



[2/2] hbase git commit: HBASE-19922 remove ProtobufUtil::PRIMITIVES

2018-02-05 Thread mdrob
HBASE-19922 remove ProtobufUtil::PRIMITIVES


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e69c1fd7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e69c1fd7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e69c1fd7

Branch: refs/heads/branch-2
Commit: e69c1fd7a004c5f1322734170084c2cb1de2299b
Parents: 7723a3d
Author: Mike Drob 
Authored: Fri Feb 2 15:59:52 2018 -0600
Committer: Mike Drob 
Committed: Mon Feb 5 16:55:45 2018 -0600

--
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java  | 16 
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java  | 16 
 2 files changed, 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e69c1fd7/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 9739254..54837dc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -32,7 +32,6 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -122,11 +121,6 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Primitive type to class mapping.
-   */
-  private final static Map PRIMITIVES = new HashMap<>();
-
-  /**
* Many results are simple: no cell, exists true or false. To save on object 
creations,
*  we reuse them across calls.
*/
@@ -182,16 +176,6 @@ public final class ProtobufUtil {
 ClassLoader parent = ProtobufUtil.class.getClassLoader();
 Configuration conf = HBaseConfiguration.create();
 CLASS_LOADER = new DynamicClassLoader(conf, parent);
-
-PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE);
-PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE);
-PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE);
-PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE);
-PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE);
-PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE);
-PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE);
-PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE);
-PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e69c1fd7/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index b26802f..3a59492 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -25,7 +25,6 @@ import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -200,11 +199,6 @@ public final class ProtobufUtil {
   }
 
   /**
-   * Primitive type to class mapping.
-   */
-  private final static Map PRIMITIVES = new HashMap<>();
-
-  /**
* Many results are simple: no cell, exists true or false. To save on object 
creations,
*  we reuse them across calls.
*/
@@ -259,16 +253,6 @@ public final class ProtobufUtil {
 ClassLoader parent = ProtobufUtil.class.getClassLoader();
 Configuration conf = HBaseConfiguration.create();
 CLASS_LOADER = new DynamicClassLoader(conf, parent);
-
-PRIMITIVES.put(Boolean.TYPE.getName(), Boolean.TYPE);
-PRIMITIVES.put(Byte.TYPE.getName(), Byte.TYPE);
-PRIMITIVES.put(Character.TYPE.getName(), Character.TYPE);
-PRIMITIVES.put(Short.TYPE.getName(), Short.TYPE);
-PRIMITIVES.put(Integer.TYPE.getName(), Integer.TYPE);
-PRIMITIVES.put(Long.TYPE.getName(), Long.TYPE);
-PRIMITIVES.put(Float.TYPE.getName(), Float.TYPE);
-PRIMITIVES.put(Double.TYPE.getName(), Double.TYPE);
-PRIMITIVES.put(Void.TYPE.getName(), Void.TYPE);
   }
 
   /**



hbase git commit: HBASE-19940 TestMetaShutdownHandler flakey

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a1600c149 -> 7723a3d60


HBASE-19940 TestMetaShutdownHandler flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7723a3d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7723a3d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7723a3d6

Branch: refs/heads/branch-2
Commit: 7723a3d60d9c13a3f0919cf7b435e1bae73d54a8
Parents: a1600c1
Author: Michael Stack 
Authored: Mon Feb 5 12:37:48 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 12:38:51 2018 -0800

--
 .../java/org/apache/hadoop/hbase/util/JVMClusterUtil.java | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7723a3d6/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index a85e89e..172c170 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -296,7 +296,15 @@ public class JVMClusterUtil {
 if (!atLeastOneLiveServer) break;
 for (RegionServerThread t : regionservers) {
   if (t.isAlive()) {
-LOG.warn("RegionServerThreads taking too long to stop, 
interrupting");
+LOG.warn("RegionServerThreads taking too long to stop, 
interrupting; thread dump "  +
+  "if > three attempts");
+if (i > 3) {
+  try {
+Threads.threadDumpingIsAlive(t.getRegionServer().getThread());
+  } catch (InterruptedException e) {
+e.printStackTrace();
+  }
+}
 t.interrupt();
   }
 }



hbase git commit: HBASE-19940 TestMetaShutdownHandler flakey

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 9f2149f17 -> 6307689a6


HBASE-19940 TestMetaShutdownHandler flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6307689a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6307689a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6307689a

Branch: refs/heads/master
Commit: 6307689a6e03f30d7d1490fbf9fe9dff9138a7ff
Parents: 9f2149f
Author: Michael Stack 
Authored: Mon Feb 5 12:37:48 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 12:37:48 2018 -0800

--
 .../java/org/apache/hadoop/hbase/util/JVMClusterUtil.java | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6307689a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index a85e89e..172c170 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -296,7 +296,15 @@ public class JVMClusterUtil {
 if (!atLeastOneLiveServer) break;
 for (RegionServerThread t : regionservers) {
   if (t.isAlive()) {
-LOG.warn("RegionServerThreads taking too long to stop, 
interrupting");
+LOG.warn("RegionServerThreads taking too long to stop, 
interrupting; thread dump "  +
+  "if > three attempts");
+if (i > 3) {
+  try {
+Threads.threadDumpingIsAlive(t.getRegionServer().getThread());
+  } catch (InterruptedException e) {
+e.printStackTrace();
+  }
+}
 t.interrupt();
   }
 }



hbase git commit: HBASE-19840 Flakey TestMetaWithReplicas; ADDENDUM Adding debug

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c245bd5c0 -> 9f2149f17


HBASE-19840 Flakey TestMetaWithReplicas; ADDENDUM Adding debug


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9f2149f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9f2149f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9f2149f1

Branch: refs/heads/master
Commit: 9f2149f171e5bcd4e0160458f818fa192c62c082
Parents: c245bd5
Author: Michael Stack 
Authored: Mon Feb 5 11:00:46 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 11:01:09 2018 -0800

--
 .../java/org/apache/hadoop/hbase/TestServerName.java   | 13 +
 .../hadoop/hbase/client/TestMetaWithReplicas.java  | 11 ---
 2 files changed, 21 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9f2149f1/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
index a6e1401..2fdf542 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
@@ -22,6 +22,8 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertTrue;
 
+import java.util.HashSet;
+import java.util.Set;
 import java.util.regex.Pattern;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -39,6 +41,17 @@ public class TestServerName {
   HBaseClassTestRule.forClass(TestServerName.class);
 
   @Test
+  public void testHash() {
+ServerName sn1 = 
ServerName.parseServerName("asf903.gq1.ygridcore.net,52690,1517835491385");
+ServerName sn2 = 
ServerName.parseServerName("asf903.gq1.ygridcore.net,42231,1517835491329");
+Set sns = new HashSet();
+sns.add(sn2);
+sns.add(sn1);
+sns.add(sn1);
+assertEquals(2, sns.size());
+  }
+
+  @Test
   public void testGetHostNameMinusDomain() {
 assertEquals("2607:f0d0:1002:51::4",
   ServerName.getHostNameMinusDomain("2607:f0d0:1002:51::4"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/9f2149f1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 06f8698..2da9886 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -98,14 +98,19 @@ public class TestMetaWithReplicas {
 TEST_UTIL.startMiniCluster(REGIONSERVERS_COUNT);
 AssignmentManager am = 
TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
 Set sns = new HashSet();
-for (int replicaId = 0; replicaId < 3; replicaId ++) {
+ServerName hbaseMetaServerName =
+TEST_UTIL.getMiniHBaseCluster().getMaster().getMetaTableLocator().
+getMetaRegionLocation(TEST_UTIL.getZooKeeperWatcher());
+LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName);
+sns.add(hbaseMetaServerName);
+for (int replicaId = 1; replicaId < 3; replicaId ++) {
   RegionInfo h =
   
RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO,
   replicaId);
   try {
 am.waitForAssignment(h);
 ServerName sn = am.getRegionStates().getRegionServerOfRegion(h);
-LOG.info(h.getRegionNameAsString() + " on " + sn);
+LOG.info("HBASE:META DEPLOY: " + h.getRegionNameAsString() + " on " + 
sn);
 sns.add(sn);
   } catch (NoSuchProcedureException e) {
 LOG.info("Presume the procedure has been cleaned up so just proceed: " 
+ e.toString());
@@ -116,7 +121,7 @@ public class TestMetaWithReplicas {
 if (sns.size() == 1) {
   int count = 
TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size();
   assertTrue("count=" + count, count == REGIONSERVERS_COUNT);
-  LOG.warn("All hbase:meta replicas are on the one server; moving 
hbase:meta");
+  LOG.warn("All hbase:meta replicas are on the one server; moving 
hbase:meta: " + sns);
   int metaServerIndex = TEST_UTIL.getHBaseCluster().getServerWithMeta();
   int newServerIndex = metaServerIndex;
   while (newServerIndex == metaServerIndex) {



hbase git commit: HBASE-19837 Flakey TestRegionLoad; ADDENDUM Report more often and wait less time on change (also add some debug on TestMetaShutdown test)

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c5f86f2ce -> c245bd5c0


HBASE-19837 Flakey TestRegionLoad; ADDENDUM Report more often and wait less 
time on change (also add some debug on TestMetaShutdown test)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c245bd5c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c245bd5c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c245bd5c

Branch: refs/heads/master
Commit: c245bd5c036dbe84deada8ff94f12a984576ffe4
Parents: c5f86f2
Author: Michael Stack 
Authored: Mon Feb 5 08:39:46 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 08:40:26 2018 -0800

--
 .../org/apache/hadoop/hbase/TestRegionLoad.java | 10 +---
 .../hbase/master/TestMetaShutdownHandler.java   | 27 ++--
 2 files changed, 21 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c245bd5c/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
index 801d2d8..d0484d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -64,6 +64,9 @@ public class TestRegionLoad {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+// Make servers report eagerly. This test is about looking at the cluster 
status reported.
+// Make it so we don't have to wait around too long to see change.
+UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 500);
 UTIL.startMiniCluster(4);
 admin = UTIL.getAdmin();
 admin.setBalancerRunning(false, true);
@@ -114,10 +117,11 @@ public class TestRegionLoad {
   }
   checkRegionsAndRegionLoads(tableRegions, regionLoads);
 }
+int pause = 
UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3000);
 
 // Just wait here. If this fixes the test, come back and do a better job.
-// Thought is that cluster status is stale.
-Threads.sleep(1);
+// Would have to redo the below so can wait on cluster status changing.
+Threads.sleep(2 * pause);
 
 // Check RegionLoad matches the regionLoad from ClusterStatus
 ClusterStatus clusterStatus

http://git-wip-us.apache.org/repos/asf/hbase/blob/c245bd5c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
index d063f0a..7e730ae 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -43,13 +42,15 @@ import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests handling of meta-carrying region server failover.
  */
 @Category(MediumTests.class)
 public class TestMetaShutdownHandler {
-
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaShutdownHandler.class);
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
   HBaseClassTestRule.forClass(TestMetaShutdownHandler.class);
@@ -80,7 +81,6 @@ public class TestMetaShutdownHandler {
   @Test (timeout=18)
   public void testExpireMetaRegionServer() throws Exception {
 MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-
 HMaster master = cluster.getMaster();
 RegionStates regionStates = 
master.getAssignmentManager().getRegionStates();
 

hbase git commit: HBASE-19837 Flakey TestRegionLoad; ADDENDUM Report more often and wait less time on change (also add some debug on TestMetaShutdown test)

2018-02-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f0a5f12d9 -> 29016bc10


HBASE-19837 Flakey TestRegionLoad; ADDENDUM Report more often and wait less 
time on change (also add some debug on TestMetaShutdown test)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29016bc1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29016bc1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29016bc1

Branch: refs/heads/branch-2
Commit: 29016bc100ff3f7f266222bbdda2f66b474f3f7f
Parents: f0a5f12
Author: Michael Stack 
Authored: Mon Feb 5 08:39:46 2018 -0800
Committer: Michael Stack 
Committed: Mon Feb 5 08:39:52 2018 -0800

--
 .../org/apache/hadoop/hbase/TestRegionLoad.java | 10 +---
 .../hbase/master/TestMetaShutdownHandler.java   | 27 ++--
 2 files changed, 21 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/29016bc1/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
index 801d2d8..d0484d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionLoad.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -64,6 +64,9 @@ public class TestRegionLoad {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+// Make servers report eagerly. This test is about looking at the cluster 
status reported.
+// Make it so we don't have to wait around too long to see change.
+UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 500);
 UTIL.startMiniCluster(4);
 admin = UTIL.getAdmin();
 admin.setBalancerRunning(false, true);
@@ -114,10 +117,11 @@ public class TestRegionLoad {
   }
   checkRegionsAndRegionLoads(tableRegions, regionLoads);
 }
+int pause = 
UTIL.getConfiguration().getInt("hbase.regionserver.msginterval", 3000);
 
 // Just wait here. If this fixes the test, come back and do a better job.
-// Thought is that cluster status is stale.
-Threads.sleep(1);
+// Would have to redo the below so can wait on cluster status changing.
+Threads.sleep(2 * pause);
 
 // Check RegionLoad matches the regionLoad from ClusterStatus
 ClusterStatus clusterStatus

http://git-wip-us.apache.org/repos/asf/hbase/blob/29016bc1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
index d063f0a..7e730ae 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -43,13 +42,15 @@ import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests handling of meta-carrying region server failover.
  */
 @Category(MediumTests.class)
 public class TestMetaShutdownHandler {
-
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestMetaShutdownHandler.class);
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
   HBaseClassTestRule.forClass(TestMetaShutdownHandler.class);
@@ -80,7 +81,6 @@ public class TestMetaShutdownHandler {
   @Test (timeout=18)
   public void testExpireMetaRegionServer() throws Exception {
 MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-
 HMaster master = cluster.getMaster();
 RegionStates regionStates = 
master.getAssignmentManager().getRegionStates();
 

hbase git commit: HBASE-19703 Functionality added as part of HBASE-12583 is not working after moving the split code to master

2018-02-05 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master ad3a1ba49 -> c5f86f2ce


HBASE-19703 Functionality added as part of HBASE-12583 is not working after 
moving the split code to master

Co-authored-by: Michael Stack 

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5f86f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5f86f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5f86f2c

Branch: refs/heads/master
Commit: c5f86f2ce42eafc5d4b4f83f4471cacd372abae3
Parents: ad3a1ba
Author: Rajeshbabu Chintaguntla 
Authored: Mon Feb 5 23:35:32 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Mon Feb 5 23:56:18 2018 +0800

--
 .../assignment/SplitTableRegionProcedure.java   | 28 +++-
 .../hbase/regionserver/HRegionFileSystem.java   |  4 ++-
 .../hbase/regionserver/RegionSplitPolicy.java   | 24 -
 .../TestSplitTransactionOnCluster.java  |  2 +-
 4 files changed, 42 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5f86f2c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 88e6012..1828340 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -32,7 +32,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -63,16 +62,20 @@ import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -91,6 +94,8 @@ public class SplitTableRegionProcedure
   private RegionInfo daughter_1_RI;
   private RegionInfo daughter_2_RI;
   private byte[] bestSplitRow;
+  private TableDescriptor htd;
+  private RegionSplitPolicy splitPolicy;
 
   public SplitTableRegionProcedure() {
 // Required by the Procedure framework to create the procedure on replay
@@ -115,6 +120,16 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
+this.htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
+if(this.htd.getRegionSplitPolicyClassName() != null) {
+  // Since we don't have region reference here, creating the split policy 
instance without it.
+  // This can be used to invoke methods which don't require Region 
reference. This instantiation
+  // of a class on Master-side though it only makes sense on the 
RegionServer-side is
+  // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
+  Class clazz =
+  RegionSplitPolicy.getSplitPolicyClass(this.htd, 
env.getMasterConfiguration());
+  this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
+}
   }
 
   /**
@@ -597,7 +612,6 @@ public class SplitTableRegionProcedure
 final List>> futures = new 
ArrayList>>(nbFiles);
 
 // Split each store file.
-final TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
 for (Map.Entrye: files.entrySet()) {
   byte [] 

hbase git commit: HBASE-19703 Functionality added as part of HBASE-12583 is not working after moving the split code to master

2018-02-05 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 514eadbe9 -> f0a5f12d9


HBASE-19703 Functionality added as part of HBASE-12583 is not working after 
moving the split code to master

Co-authored-by: Michael Stack 

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0a5f12d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0a5f12d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0a5f12d

Branch: refs/heads/branch-2
Commit: f0a5f12d97784f609ccd15e1228d424bcab59c41
Parents: 514eadb
Author: Rajeshbabu Chintaguntla 
Authored: Mon Feb 5 23:35:32 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Mon Feb 5 23:41:32 2018 +0800

--
 .../assignment/SplitTableRegionProcedure.java   | 28 +++-
 .../hbase/regionserver/HRegionFileSystem.java   |  4 ++-
 .../hbase/regionserver/RegionSplitPolicy.java   | 24 -
 .../TestSplitTransactionOnCluster.java  |  2 +-
 4 files changed, 42 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0a5f12d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 88e6012..1828340 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -32,7 +32,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -63,16 +62,20 @@ import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -91,6 +94,8 @@ public class SplitTableRegionProcedure
   private RegionInfo daughter_1_RI;
   private RegionInfo daughter_2_RI;
   private byte[] bestSplitRow;
+  private TableDescriptor htd;
+  private RegionSplitPolicy splitPolicy;
 
   public SplitTableRegionProcedure() {
 // Required by the Procedure framework to create the procedure on replay
@@ -115,6 +120,16 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
+this.htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
+if(this.htd.getRegionSplitPolicyClassName() != null) {
+  // Since we don't have region reference here, creating the split policy 
instance without it.
+  // This can be used to invoke methods which don't require Region 
reference. This instantiation
+  // of a class on Master-side though it only makes sense on the 
RegionServer-side is
+  // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
+  Class clazz =
+  RegionSplitPolicy.getSplitPolicyClass(this.htd, 
env.getMasterConfiguration());
+  this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
+}
   }
 
   /**
@@ -597,7 +612,6 @@ public class SplitTableRegionProcedure
 final List>> futures = new 
ArrayList>>(nbFiles);
 
 // Split each store file.
-final TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
 for (Map.Entrye: files.entrySet()) {
   byte [] 

[25/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html
index 1451196..c472f61 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.html
@@ -28,15 +28,15 @@
 020
 021import java.io.IOException;
 022import java.util.ArrayList;
-023import java.util.Collection;
-024import java.util.Collections;
-025import java.util.HashMap;
-026import java.util.HashSet;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Set;
-031import java.util.TreeMap;
+023import java.util.Collections;
+024import java.util.HashMap;
+025import java.util.HashSet;
+026import java.util.LinkedList;
+027import java.util.List;
+028import java.util.Map;
+029import java.util.Set;
+030import java.util.TreeMap;
+031
 032import 
org.apache.hadoop.conf.Configuration;
 033import 
org.apache.hadoop.hbase.ClusterMetrics;
 034import 
org.apache.hadoop.hbase.HBaseIOException;
@@ -296,154 +296,158 @@
 288   *  List of servers which are 
online.
 289   * @return the list
 290   */
-291  private ListServerName 
filterServers(CollectionAddress servers,
-292  CollectionServerName 
onlineServers) {
-293ArrayListServerName finalList 
= new ArrayListServerName();
-294for (Address server : servers) {
-295  for(ServerName curr: onlineServers) 
{
-296
if(curr.getAddress().equals(server)) {
-297  finalList.add(curr);
-298}
-299  }
-300}
-301return finalList;
-302  }
-303
-304  @VisibleForTesting
-305  public SetRegionInfo 
getMisplacedRegions(
-306  MapRegionInfo, ServerName 
regions) throws IOException {
-307SetRegionInfo 
misplacedRegions = new HashSet();
-308for(Map.EntryRegionInfo, 
ServerName region : regions.entrySet()) {
-309  RegionInfo regionInfo = 
region.getKey();
-310  ServerName assignedServer = 
region.getValue();
-311  RSGroupInfo info = 
rsGroupInfoManager.getRSGroup(rsGroupInfoManager.
-312  
getRSGroupOfTable(regionInfo.getTable()));
-313  if (assignedServer == null) {
-314LOG.debug("There is no assigned 
server for {}", region);
-315continue;
-316  }
-317  RSGroupInfo otherInfo = 
rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress());
-318  if (info == null  
otherInfo == null) {
-319LOG.warn("Couldn't obtain rs 
group information for {} on {}", region, assignedServer);
-320continue;
-321  }
-322  if ((info == null || 
!info.containsServer(assignedServer.getAddress( {
-323LOG.debug("Found misplaced 
region: " + regionInfo.getRegionNameAsString() +
-324" on server: " + 
assignedServer +
-325" found in group: " +  
otherInfo +
-326" outside of group: " + (info 
== null ? "UNKNOWN" : info.getName()));
-327
misplacedRegions.add(regionInfo);
-328  }
-329}
-330return misplacedRegions;
-331  }
-332
-333  private ServerName 
findServerForRegion(
-334  MapServerName, 
ListRegionInfo existingAssignments, RegionInfo region) {
-335for (Map.EntryServerName, 
ListRegionInfo entry : existingAssignments.entrySet()) {
-336  if 
(entry.getValue().contains(region)) {
-337return entry.getKey();
-338  }
-339}
-340
-341throw new 
IllegalStateException("Could not find server for region "
-342+ region.getShortNameToLog());
-343  }
+291  private ListServerName 
filterServers(SetAddress servers,
+292 
ListServerName onlineServers) {
+293/**
+294 * servers is actually a TreeSet (see 
{@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo}),
+295 * having its contains()'s time 
complexity as O(logn), which is good enough.
+296 * TODO: consider using HashSet to 
pursue O(1) for contains() throughout the calling chain
+297 * if needed. */
+298ArrayListServerName finalList 
= new ArrayList();
+299for (ServerName onlineServer : 
onlineServers) {
+300  if 
(servers.contains(onlineServer.getAddress())) {
+301finalList.add(onlineServer);
+302  }
+303}
+304
+305return finalList;
+306  }
+307
+308  @VisibleForTesting
+309  public SetRegionInfo 
getMisplacedRegions(
+310  MapRegionInfo, ServerName 
regions) throws IOException {
+311SetRegionInfo 
misplacedRegions = new HashSet();
+312for(Map.EntryRegionInfo, 
ServerName region : regions.entrySet()) {
+313  RegionInfo regionInfo = 
region.getKey();
+314  ServerName assignedServer = 
region.getValue();
+315  RSGroupInfo info = 

[35/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/04d647a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/04d647a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/04d647a7

Branch: refs/heads/asf-site
Commit: 04d647a7e994c37492d052b5cc0c6bba3cd2ce67
Parents: 03d2c36
Author: jenkins 
Authored: Mon Feb 5 15:13:34 2018 +
Committer: jenkins 
Committed: Mon Feb 5 15:13:34 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|6 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 4984 +++---
 checkstyle.rss  |   22 +-
 coc.html|4 +-
 cygwin.html |4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/allclasses-frame.html|1 +
 devapidocs/allclasses-noframe.html  |1 +
 devapidocs/constant-values.html |6 +-
 devapidocs/index-all.html   |   39 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hadoop/hbase/class-use/ServerName.html  |8 +-
 .../hadoop/hbase/client/package-tree.html   |   22 +-
 .../hadoop/hbase/executor/package-tree.html |2 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 .../hadoop/hbase/io/hfile/package-tree.html |6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |4 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../hadoop/hbase/master/package-tree.html   |4 +-
 .../procedure/PeerProcedureInterface.html   |2 +-
 .../procedure/class-use/MasterProcedureEnv.html |8 +-
 .../class-use/PeerProcedureInterface.html   |   21 +-
 .../class-use/ProcedurePrepareLatch.html|4 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../replication/AbstractPeerProcedure.html  |  620 ++
 .../master/replication/AddPeerProcedure.html|   30 +-
 .../replication/DisablePeerProcedure.html   |   22 +-
 .../master/replication/EnablePeerProcedure.html |   22 +-
 .../master/replication/ModifyPeerProcedure.html |  320 +-
 .../master/replication/RemovePeerProcedure.html |   22 +-
 .../replication/UpdatePeerConfigProcedure.html  |   26 +-
 .../class-use/AbstractPeerProcedure.html|  198 +
 .../hbase/master/replication/package-frame.html |1 +
 .../master/replication/package-summary.html |   23 +-
 .../hbase/master/replication/package-tree.html  |6 +-
 .../hbase/master/replication/package-use.html   |   11 +-
 .../hadoop/hbase/net/class-use/Address.html |4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   18 +-
 .../hbase/procedure2/StateMachineProcedure.html |2 +-
 .../class-use/Procedure.LockState.html  |2 +-
 .../hbase/procedure2/class-use/Procedure.html   |   21 +-
 .../class-use/ProcedureStateSerializer.html |4 +-
 .../class-use/StateMachineProcedure.html|   19 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../hadoop/hbase/quotas/package-tree.html   |8 +-
 .../hadoop/hbase/regionserver/package-tree.html |   12 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../replication/regionserver/package-tree.html  |2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.html |   34 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html |   10 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html |   76 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html |   30 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html |6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |   56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |   14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |   18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html   |   10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |   42 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |   22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |   20 +-
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |   38 +-
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   |   12 +-
 .../util/HBaseFsck.WorkItemHdfsRegionInfo.html  |   12 +-
 .../util/HBaseFsck.WorkItemOverlapMerge.html|   10 +-
 .../hbase/util/HBaseFsck.WorkItemRegion.html|   16 +-
 

[22/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}

[14/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873  

[08/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873

[17/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876

[12/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +

[28/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index e8b4b2b..b9f645f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.PrintingErrorReporter
+static class HBaseFsck.PrintingErrorReporter
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HBaseFsck.ErrorReporter
 
@@ -301,7 +301,7 @@ implements 
 
 errorCount
-publicint errorCount
+publicint errorCount
 
 
 
@@ -310,7 +310,7 @@ implements 
 
 showProgress
-privateint showProgress
+privateint showProgress
 
 
 
@@ -319,7 +319,7 @@ implements 
 
 progressThreshold
-private static finalint progressThreshold
+private static finalint progressThreshold
 
 See Also:
 Constant
 Field Values
@@ -332,7 +332,7 @@ implements 
 
 errorTables
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
+http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
 
 
 
@@ -341,7 +341,7 @@ implements 
 
 errorList
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
 
 
 
@@ -358,7 +358,7 @@ implements 
 
 PrintingErrorReporter
-PrintingErrorReporter()
+PrintingErrorReporter()
 
 
 
@@ -375,7 +375,7 @@ implements 
 
 clear
-publicvoidclear()
+publicvoidclear()
 
 Specified by:
 clearin
 interfaceHBaseFsck.ErrorReporter
@@ -388,7 +388,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 Specified by:
@@ -402,7 +402,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
 HBaseFsck.TableInfotable)
 
@@ -417,7 +417,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
 HBaseFsck.TableInfotable,
 HBaseFsck.HbckInfoinfo)
@@ -433,7 +433,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage,
 HBaseFsck.TableInfotable,
 HBaseFsck.HbckInfoinfo1,
@@ -450,7 +450,7 @@ implements 
 
 reportError
-publicvoidreportError(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
+publicvoidreportError(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 Specified by:
 reportErrorin
 interfaceHBaseFsck.ErrorReporter
@@ -463,7 +463,7 @@ implements 
 
 report
-publicvoidreport(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
+publicvoidreport(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 Report error information, but do not increment the error 
count.  Intended for cases
  where the actual error would have been reported previously.
 
@@ -480,7 +480,7 @@ implements 
 
 summarize
-publicintsummarize()

[05/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876errors.print("");
-1877  }
-1878
-1879  /**
-1880   * Record the 

[26/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 38622f6..73fcc01 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -535,14 +535,14 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.ChecksumType
-org.apache.hadoop.hbase.util.Order
-org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.PoolMap.PoolType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.PrettyPrinter.Unit
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.Order
+org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
+org.apache.hadoop.hbase.util.ChecksumType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index 419f44c..bb5b957 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -189,8 +189,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.wal.WALFactory.Providers
 org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
+org.apache.hadoop.hbase.wal.WALFactory.Providers
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index e6a9c83..afba09e 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -2645,6 +2645,19 @@
 org.apache.hadoop.hbase.procedure2.SequentialProcedureTEnvironment
 org.apache.hadoop.hbase.procedure2.StateMachineProcedureTEnvironment,TState
 
+org.apache.hadoop.hbase.master.replication.AbstractPeerProcedureTState (implements 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface)
+
+org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure
+
+org.apache.hadoop.hbase.master.replication.AddPeerProcedure
+org.apache.hadoop.hbase.master.replication.DisablePeerProcedure
+org.apache.hadoop.hbase.master.replication.EnablePeerProcedure
+org.apache.hadoop.hbase.master.replication.RemovePeerProcedure
+org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure
+
+
+
+
 org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedureTState
 (implements org.apache.hadoop.hbase.master.procedure.TableProcedureInterface)
 
 org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure
@@ -2673,15 +2686,6 @@
 org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure
 
 
-org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure (implements 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface)
-
-org.apache.hadoop.hbase.master.replication.AddPeerProcedure
-org.apache.hadoop.hbase.master.replication.DisablePeerProcedure
-org.apache.hadoop.hbase.master.replication.EnablePeerProcedure
-org.apache.hadoop.hbase.master.replication.RemovePeerProcedure
-org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure
-
-
 

[24/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table 

[27/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index 9488fef..89fb8f4 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.WorkItemRegion
+static class HBaseFsck.WorkItemRegion
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 Contact a region server and get all information from 
it
@@ -226,7 +226,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 hbck
-private finalHBaseFsck hbck
+private finalHBaseFsck hbck
 
 
 
@@ -235,7 +235,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 rsinfo
-private finalServerName rsinfo
+private finalServerName rsinfo
 
 
 
@@ -244,7 +244,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 errors
-private finalHBaseFsck.ErrorReporter 
errors
+private finalHBaseFsck.ErrorReporter 
errors
 
 
 
@@ -253,7 +253,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 connection
-private finalClusterConnection connection
+private finalClusterConnection connection
 
 
 
@@ -270,7 +270,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 WorkItemRegion
-WorkItemRegion(HBaseFsckhbck,
+WorkItemRegion(HBaseFsckhbck,
ServerNameinfo,
HBaseFsck.ErrorReportererrors,
ClusterConnectionconnection)
@@ -290,7 +290,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 call
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -306,7 +306,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 filterRegions
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfofilterRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfofilterRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index c1441ad..febf9db 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -2068,7 +2068,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 cmp
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
 
 
 
@@ -2819,7 +2819,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 isTableDisabled
-privatebooleanisTableDisabled(TableNametableName)
+privatebooleanisTableDisabled(TableNametableName)
 Check if the specified region's table is disabled.
 
 Parameters:
@@ -2833,7 +2833,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 loadHdfsRegionDirs
-publicvoidloadHdfsRegionDirs()
+publicvoidloadHdfsRegionDirs()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or 

[30/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/master/replication/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/replication/package-tree.html
index 71ae56c..cb620a4 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/replication/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/replication/package-tree.html
@@ -86,7 +86,9 @@
 org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure (implements 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface, 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedureTEnv,TRemote)
 org.apache.hadoop.hbase.procedure2.StateMachineProcedureTEnvironment,TState
 
-org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure (implements 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface)
+org.apache.hadoop.hbase.master.replication.AbstractPeerProcedureTState (implements 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface)
+
+org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure
 
 org.apache.hadoop.hbase.master.replication.AddPeerProcedure
 org.apache.hadoop.hbase.master.replication.DisablePeerProcedure
@@ -99,6 +101,8 @@
 
 
 
+
+
 org.apache.hadoop.hbase.master.replication.ReplicationPeerManager
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/master/replication/package-use.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/master/replication/package-use.html
index 4899a6e..1655c17 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/replication/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/replication/package-use.html
@@ -108,7 +108,8 @@
 
 
 ModifyPeerProcedure
-The base class for all replication peer related 
procedure.
+The base class for all replication peer related procedure 
except sync replication state
+ transition.
 
 
 
@@ -146,11 +147,17 @@
 
 
 
-ModifyPeerProcedure
+AbstractPeerProcedure
 The base class for all replication peer related 
procedure.
 
 
 
+ModifyPeerProcedure
+The base class for all replication peer related procedure 
except sync replication state
+ transition.
+
+
+
 ReplicationPeerManager
 Manages and performs all replication admin operations.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html 
b/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
index 61bfe18..f902457 100644
--- a/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
+++ b/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
@@ -438,8 +438,8 @@
 
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
-RSGroupBasedLoadBalancer.filterServers(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionAddressservers,
- http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionServerNameonlineServers)
+RSGroupBasedLoadBalancer.filterServers(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetAddressservers,
+ http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameonlineServers)
 Filter servers based on the online servers.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index c62bdc2..dcb7d95 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -446,19 +446,19 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.Cell.Type

[11/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load 

[18/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876

[33/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 5161981..0f3a6d1 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2018 The Apache Software Foundation
 
-  File: 3527,
- Errors: 16583,
+  File: 3528,
+ Errors: 16579,
  Warnings: 0,
  Infos: 0
   
@@ -9426,6 +9426,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure.java;>org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.quotas.TestSuperUserQuotaPermissions.java;>org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java
 
 
@@ -29791,7 +29805,7 @@ under the License.
   0
 
 
-  266
+  265
 
   
   
@@ -34635,7 +34649,7 @@ under the License.
   0
 
 
-  3
+  0
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/coc.html
--
diff --git a/coc.html b/coc.html
index 3b1d91e..0086d5c 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index ff7bfb6..ea7a3ab 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 72cff9b..e4b8161 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 261c6fb..ea0d424 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -1035,7 +1035,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index fe0023f..6154ce3 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 

[23/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873

[02/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-shaded-check-invariants/project-summary.html
--
diff --git a/hbase-shaded-check-invariants/project-summary.html 
b/hbase-shaded-check-invariants/project-summary.html
index 483501c..c2401bd 100644
--- a/hbase-shaded-check-invariants/project-summary.html
+++ b/hbase-shaded-check-invariants/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants  Project 
Summary
 
@@ -166,7 +166,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-shaded-check-invariants/source-repository.html
--
diff --git a/hbase-shaded-check-invariants/source-repository.html 
b/hbase-shaded-check-invariants/source-repository.html
index 445eae4..3b3e7ea 100644
--- a/hbase-shaded-check-invariants/source-repository.html
+++ b/hbase-shaded-check-invariants/source-repository.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants  Source Code 
Management
 
@@ -134,7 +134,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-shaded-check-invariants/team-list.html
--
diff --git a/hbase-shaded-check-invariants/team-list.html 
b/hbase-shaded-check-invariants/team-list.html
index 864e69e..9fa2b2a 100644
--- a/hbase-shaded-check-invariants/team-list.html
+++ b/hbase-shaded-check-invariants/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants  Project 
Team
 
@@ -553,7 +553,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/index.html
--
diff --git a/index.html b/index.html
index 25c59e0..0e0db16 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -438,7 +438,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/integration.html
--
diff --git a/integration.html b/integration.html
index 39275ec..cc7d7fe 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -296,7 +296,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index cb7e6c1..3ce0baf 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Issue Management
 
@@ -293,7 +293,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/license.html
--
diff --git a/license.html b/license.html
index 2edf77a..2d5df34 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -496,7 +496,7 @@

[21/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874

[13/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 

[10/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876

[03/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
index 2462e59..70dd1b5 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
index 2be2bd6..628d664 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
index 7024118..e98ad30 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/license.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/license.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/license.html
index ac50541..eb0d409 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/license.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/mail-lists.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/mail-lists.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/mail-lists.html
index 6a0c7ab..6608ae6 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/mail-lists.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Mailing Lists
   

[32/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 8fca4de..464ede8 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -212,9 +212,9 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
new file mode 100644
index 000..f9798ec
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
@@ -0,0 +1,620 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+AbstractPeerProcedure (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.replication
+Class 
AbstractPeerProcedureTState
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
+
+
+org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,TState
+
+
+org.apache.hadoop.hbase.master.replication.AbstractPeerProcedureTState
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, PeerProcedureInterface
+
+
+Direct Known Subclasses:
+ModifyPeerProcedure
+
+
+
+@InterfaceAudience.Private
+public abstract class AbstractPeerProcedureTState
+extends StateMachineProcedureMasterProcedureEnv,TState
+implements PeerProcedureInterface
+The base class for all replication peer related 
procedure.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
+StateMachineProcedure.Flow
+
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.PeerProcedureInterface
+PeerProcedureInterface.PeerOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+protected ProcedurePrepareLatch
+latch
+
+
+private boolean
+locked
+
+
+protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+peerId
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+

hbase-site git commit: INFRA-10751 Empty commit

2018-02-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 04d647a7e -> 2412674d2


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/2412674d
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/2412674d
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/2412674d

Branch: refs/heads/asf-site
Commit: 2412674d2331fc3ced9b3d86b8e40f700966d367
Parents: 04d647a
Author: jenkins 
Authored: Mon Feb 5 15:13:53 2018 +
Committer: jenkins 
Committed: Mon Feb 5 15:13:53 2018 +

--

--




[07/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 

[20/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}

[34/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index b1630e2..50de8ba 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3527
+3528
 0
 0
-16583
+16579
 
 Files
 
@@ -442,7 +442,7 @@
 org/apache/hadoop/hbase/HBaseTestingUtility.java
 0
 0
-266
+265
 
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
@@ -764,9551 +764,9546 @@
 0
 1
 
-org/apache/hadoop/hbase/TestFullLogReconstruction.java
-0
-0
-3
-
 org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/TestHBaseConfiguration.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHBaseTestingUtility.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHTableDescriptor.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/TestIOFencing.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestInfoServers.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestJMXConnectorServer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestKeyValue.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestLocalHBaseCluster.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestMetaTableAccessor.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/TestMetaTableLocator.java
 0
 0
 33
-
+
 org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestMultiVersions.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestNamespace.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestNodeHealthCheckChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/TestPerformanceEvaluation.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestRegionRebalancing.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TestSerialization.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestTagRewriteCell.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestTimeout.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/TimestampTestBase.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/UnknownRegionException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/Waiter.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/ZKNamespaceManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ZNodeClearer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/FailedArchiveException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HFileArchiver.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/backup/TestHFileArchiving.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/chaos/actions/Action.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/chaos/actions/ChangeCompressionAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/ChangeEncodingAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/ChangeVersionsAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/RestartRandomDataNodeAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsAction.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/chaos/actions/SplitAllRegionOfTableAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/actions/TruncateTableAction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/factories/MasterKillingMonkeyFactory.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/chaos/factories/MobNoKillMonkeyFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/factories/MobSlowDeterministicMonkeyFactory.java
 0
 0
 24
-
+
 org/apache/hadoop/hbase/chaos/factories/MonkeyConstants.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/factories/MonkeyFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/chaos/factories/NoKillMonkeyFactory.java
 0
 0
 18
-
+
 org/apache/hadoop/hbase/chaos/factories/ServerKillingMonkeyFactory.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/chaos/factories/SlowDeterministicMonkeyFactory.java
 0
 0
 42
-
+
 org/apache/hadoop/hbase/chaos/factories/StressAssignmentManagerMonkeyFactory.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java

[15/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 

[09/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }

[16/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876

[31/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
index 589a8b9..1726162 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":6,"i11":6,"i12":10,"i13":10,"i14":10,"i15":10,"i16":6};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":6,"i5":6,"i6":10,"i7":10,"i8":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -103,7 +103,10 @@ var activeTableTab = "activeTableTab";
 org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
 
 
-org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
+org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,TState
+
+
+org.apache.hadoop.hbase.master.replication.AbstractPeerProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
 
 
 org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure
@@ -114,6 +117,8 @@ var activeTableTab = "activeTableTab";
 
 
 
+
+
 
 
 
@@ -128,10 +133,10 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class ModifyPeerProcedure
-extends StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
-implements PeerProcedureInterface
-The base class for all replication peer related 
procedure.
+public abstract class ModifyPeerProcedure
+extends AbstractPeerProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
+The base class for all replication peer related procedure 
except sync replication state
+ transition.
 
 
 
@@ -180,23 +185,18 @@ implements Field and Description
 
 
-protected ProcedurePrepareLatch
-latch
-
-
-private boolean
-locked
-
-
 private static org.slf4j.Logger
 LOG
 
-
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-peerId
-
 
 
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.master.replication.AbstractPeerProcedure
+latch,
 peerId
+
+
 
 
 
@@ -241,105 +241,66 @@ implements Method and Description
 
 
-protected Procedure.LockState
-acquireLock(MasterProcedureEnvenv)
-The user should override this method if they need a lock on 
an Entity.
-
-
-
-protected void
-deserializeStateData(ProcedureStateSerializerserializer)
-Called on store load to allow the user to decode the 
previously serialized
- state.
-
-
-
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationStatestate)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
-ProcedurePrepareLatch
-getLatch()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getPeerId()
-
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState
 getState(intstateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationStatestate)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
-protected boolean
-hasLock(MasterProcedureEnvenv)
-This is used in conjunction with Procedure.holdLock(Object).
-
-
-
-protected boolean
-holdLock(MasterProcedureEnvenv)
-Used to keep the procedure lock even when the procedure is 
yielding or suspended.
-
-
-
+
 protected abstract void
 postPeerModification(MasterProcedureEnvenv)
 Called before we finish the procedure.
 
 
-
+
 protected abstract void
 prePeerModification(MasterProcedureEnvenv)
 Called before we start the actual processing.
 
 
-
+
 private void
 releaseLatch()
 
-
-protected void
-releaseLock(MasterProcedureEnvenv)
-The user should override this method, and 

[19/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876errors.print("");

[04/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 5c371ff..2074138 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index f238b31..b9449fa 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Checkstyle Results
 
@@ -150,7 +150,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 65cfd6d..06e2245 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependencies
 
@@ -272,7 +272,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 040e9ba..679f49e 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Reactor Dependency 
Convergence
 
@@ -865,7 +865,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 19363b3..c00d236 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 1c97568..f89e016 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependency 
Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-04
+  Last Published: 
2018-02-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/hbase-annotations/index.html
--
diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html
index 931c40d..3c30d49 100644
--- 

[01/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 03d2c36ec -> 04d647a7e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
index 311f94b..1d55ae7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
@@ -28,81 +28,88 @@
 020import static 
org.junit.Assert.assertEquals;
 021
 022import 
org.apache.hadoop.conf.Configuration;
-023import 
org.apache.hadoop.hbase.client.Table;
-024import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-025import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-026import 
org.apache.hadoop.hbase.util.Bytes;
-027import org.junit.AfterClass;
-028import org.junit.BeforeClass;
-029import org.junit.ClassRule;
-030import org.junit.Test;
-031import 
org.junit.experimental.categories.Category;
-032
-033@Category({ MiscTests.class, 
LargeTests.class })
-034public class TestFullLogReconstruction 
{
-035
-036  @ClassRule
-037  public static final HBaseClassTestRule 
CLASS_RULE =
-038  
HBaseClassTestRule.forClass(TestFullLogReconstruction.class);
-039
-040  private final static 
HBaseTestingUtility
-041  TEST_UTIL = new 
HBaseTestingUtility();
-042
-043  private final static TableName 
TABLE_NAME = TableName.valueOf("tabletest");
-044  private final static byte[] FAMILY = 
Bytes.toBytes("family");
-045
-046  /**
-047   * @throws java.lang.Exception
-048   */
-049  @BeforeClass
-050  public static void setUpBeforeClass() 
throws Exception {
-051Configuration c = 
TEST_UTIL.getConfiguration();
-052// quicker heartbeat interval for 
faster DN death notification
-053
c.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
-054c.setInt("dfs.heartbeat.interval", 
1);
-055c.setInt("dfs.client.socket-timeout", 
5000);
-056// faster failover with 
cluster.shutdown();fs.close() idiom
-057
c.setInt("hbase.ipc.client.connect.max.retries", 1);
-058
c.setInt("dfs.client.block.recovery.retries", 1);
-059
c.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
-060TEST_UTIL.startMiniCluster(3);
-061  }
-062
-063  /**
-064   * @throws java.lang.Exception
-065   */
-066  @AfterClass
-067  public static void tearDownAfterClass() 
throws Exception {
-068TEST_UTIL.shutdownMiniCluster();
-069  }
-070
-071  /**
-072   * Test the whole reconstruction loop. 
Build a table with regions aaa to zzz
-073   * and load every one of them multiple 
times with the same date and do a flush
-074   * at some point. Kill one of the 
region servers and scan the table. We should
-075   * see all the rows.
-076   * @throws Exception
-077   */
-078  @Test (timeout=30)
-079  public void testReconstruction() throws 
Exception {
-080Table table = 
TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
-081
-082// Load up the table with simple rows 
and count them
-083int initialCount = 
TEST_UTIL.loadTable(table, FAMILY);
-084int count = 
TEST_UTIL.countRows(table);
-085
-086assertEquals(initialCount, count);
-087
-088for(int i = 0; i  4; i++) {
-089  TEST_UTIL.loadTable(table, 
FAMILY);
-090}
-091
-092
TEST_UTIL.expireRegionServerSession(0);
-093int newCount = 
TEST_UTIL.countRows(table);
-094assertEquals(count, newCount);
-095table.close();
-096  }
-097}
+023import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+024import 
org.apache.hadoop.hbase.client.Table;
+025import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+026import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+027import 
org.apache.hadoop.hbase.testclassification.MiscTests;
+028import 
org.apache.hadoop.hbase.util.Bytes;
+029import org.junit.AfterClass;
+030import org.junit.BeforeClass;
+031import org.junit.ClassRule;
+032import org.junit.Test;
+033import 
org.junit.experimental.categories.Category;
+034
+035@Category({ MiscTests.class, 
LargeTests.class })
+036public class TestFullLogReconstruction 
{
+037
+038  @ClassRule
+039  public static final HBaseClassTestRule 
CLASS_RULE =
+040  
HBaseClassTestRule.forClass(TestFullLogReconstruction.class);
+041
+042  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+043
+044  private final static TableName 
TABLE_NAME = TableName.valueOf("tabletest");
+045  private final static byte[] FAMILY = 
Bytes.toBytes("family");
+046
+047  @BeforeClass
+048  public static void setUpBeforeClass() 
throws Exception {
+049Configuration c = 
TEST_UTIL.getConfiguration();
+050// quicker heartbeat interval for 
faster DN death notification
+051

hbase git commit: HBASE-19932 TestSecureIPC in branch-1 fails with NoSuchMethodError against hadoop 3

2018-02-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 3aaafd7dd -> 13cc7f9f5


HBASE-19932 TestSecureIPC in branch-1 fails with NoSuchMethodError against 
hadoop 3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/13cc7f9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/13cc7f9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/13cc7f9f

Branch: refs/heads/branch-1
Commit: 13cc7f9f5077b7a031715c80b771cba4698b423e
Parents: 3aaafd7
Author: tedyu 
Authored: Mon Feb 5 04:46:32 2018 -0800
Committer: tedyu 
Committed: Mon Feb 5 04:46:32 2018 -0800

--
 pom.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/13cc7f9f/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 6b700a8..1073d16 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2351,6 +2351,7 @@
   
   
 ${hadoop-three.version}
+1.0.1
 
 hbase-hadoop2-compat
 src/main/assembly/hadoop-two-compat.xml



[1/8] hbase git commit: HBASE-19936 Introduce a new base class for replication peer procedure [Forced Update!]

2018-02-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 8a79f26c0 -> 7d5e6f7d0 (forced update)


HBASE-19936 Introduce a new base class for replication peer procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ad3a1ba4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ad3a1ba4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ad3a1ba4

Branch: refs/heads/HBASE-19064
Commit: ad3a1ba4955ee8a6d8470f1ad4fcc4f2c69e6787
Parents: 7f7f2b2
Author: zhangduo 
Authored: Mon Feb 5 16:14:25 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:23:19 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  2 +-
 .../replication/AbstractPeerProcedure.java  | 97 
 .../master/replication/ModifyPeerProcedure.java | 67 +-
 3 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad3a1ba4/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index ae676ea..83099c3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -398,7 +398,7 @@ message RefreshPeerParameter {
   required ServerName target_server = 3;
 }
 
-message ModifyPeerStateData {
+message PeerProcedureStateData {
   required string peer_id = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad3a1ba4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
new file mode 100644
index 000..0ad8a63
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerProcedureStateData;
+
+/**
+ * The base class for all replication peer related procedure.
+ */
+@InterfaceAudience.Private
+public abstract class AbstractPeerProcedure
+extends StateMachineProcedure implements 
PeerProcedureInterface {
+
+  protected String peerId;
+
+  private volatile boolean locked;
+
+  // used to keep compatible with old client where we can only returns after 
updateStorage.
+  protected ProcedurePrepareLatch latch;
+
+  protected AbstractPeerProcedure() {
+  }
+
+  protected AbstractPeerProcedure(String peerId) {
+this.peerId = peerId;
+this.latch = ProcedurePrepareLatch.createLatch(2, 0);
+  }
+
+  public ProcedurePrepareLatch getLatch() {
+return latch;
+  }
+
+  @Override
+  public String getPeerId() {
+return peerId;
+  }
+
+  @Override
+  protected LockState acquireLock(MasterProcedureEnv env) {
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
+  }
+
+  @Override
+  protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
+

[3/8] hbase git commit: HBASE-19083 Introduce a new log writer which can write to two HDFSes

2018-02-05 Thread zhangduo
HBASE-19083 Introduce a new log writer which can write to two HDFSes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/874aa32d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/874aa32d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/874aa32d

Branch: refs/heads/HBASE-19064
Commit: 874aa32d22bffb5378be692dece7b22a09a48c70
Parents: ad3a1ba
Author: zhangduo 
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  26 ++--
 .../regionserver/wal/CombinedAsyncWriter.java   | 134 ++
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  67 +
 .../wal/AbstractTestProtobufLog.java| 110 +++
 .../regionserver/wal/ProtobufLogTestHelper.java |  99 ++
 .../regionserver/wal/TestAsyncProtobufLog.java  |  32 +
 .../wal/TestCombinedAsyncWriter.java| 136 +++
 .../hbase/regionserver/wal/TestProtobufLog.java |  14 +-
 .../regionserver/wal/WriterOverAsyncWriter.java |  63 +
 9 files changed, 536 insertions(+), 145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/874aa32d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index faf3b77..24094e0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -606,8 +606,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 }
   }
 
-  @Override
-  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+  protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) 
throws IOException {
 try {
   return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, 
eventLoopGroup,
 channelClass);
@@ -623,6 +622,11 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
 }
   }
 
+  @Override
+  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+return createAsyncWriter(fs, path);
+  }
+
   private void waitForSafePoint() {
 consumeLock.lock();
 try {
@@ -665,21 +669,21 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 } finally {
   consumeLock.unlock();
 }
-return executeClose(closeExecutor, oldWriter);
+return executeClose(oldWriter);
   }
 
   @Override
   protected void doShutdown() throws IOException {
 waitForSafePoint();
-executeClose(closeExecutor, writer);
+executeClose(writer);
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {
-LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but"
-  + " the close of async writer doesn't complete."
-  + "Please check the status of underlying filesystem"
-  + " or increase the wait time by the config \""
-  + ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS + "\"");
+LOG.error("We have waited " + waitOnShutdownInSeconds + " seconds but" 
+
+  " the close of async writer doesn't complete." +
+  "Please check the status of underlying filesystem" +
+  " or increase the wait time by the config \"" + 
ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS +
+  "\"");
   }
 } catch (InterruptedException e) {
   LOG.error("The wait for close of async writer is interrupted");
@@ -692,7 +696,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 }
   }
 
-  private static long executeClose(ExecutorService closeExecutor, AsyncWriter 
writer) {
+  protected final long executeClose(AsyncWriter writer) {
 long fileLength;
 if (writer != null) {
   fileLength = writer.getLength();
@@ -700,7 +704,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
 try {
   writer.close();
 } catch (IOException e) {
-  LOG.warn("close old writer failed", e);
+  LOG.warn("close writer failed", e);
 }
   });
 } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/874aa32d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java

[7/8] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

2018-02-05 Thread zhangduo
HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a358e7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a358e7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a358e7d

Branch: refs/heads/HBASE-19064
Commit: 5a358e7def2a2a6c324e6b6d0b00651643b8f094
Parents: ce755fa
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a358e7d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a358e7d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index 604e0bb..5ec14cd 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
-  

[5/8] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-02-05 Thread zhangduo
HBASE-19781 Add a new cluster state flag for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ce755faa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ce755faa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ce755faa

Branch: refs/heads/HBASE-19064
Commit: ce755faa9480996eac0145cd7cf4dda2841f0150
Parents: e8e8705
Author: Guanghao Zhang 
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  39 +
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   7 +
 .../hbase/client/ConnectionImplementation.java  |   9 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  26 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  15 ++
 .../client/ShortCircuitMasterConnection.java|   9 ++
 .../replication/ReplicationPeerConfigUtil.java  |  26 +--
 .../replication/ReplicationPeerDescription.java |  10 +-
 .../hbase/replication/SyncReplicationState.java |  48 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  10 ++
 .../src/main/protobuf/Master.proto  |   4 +
 .../src/main/protobuf/MasterProcedure.proto |   6 +-
 .../src/main/protobuf/Replication.proto |  20 +++
 .../replication/ReplicationPeerStorage.java |  18 ++-
 .../hbase/replication/ReplicationUtils.java |   1 +
 .../replication/ZKReplicationPeerStorage.java   |  60 +--
 .../replication/TestReplicationStateBasic.java  |  23 ++-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../hbase/coprocessor/MasterObserver.java   |  23 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 ++
 .../hbase/master/MasterCoprocessorHost.java |  21 +++
 .../hadoop/hbase/master/MasterRpcServices.java  |  17 ++
 .../hadoop/hbase/master/MasterServices.java |   9 ++
 .../procedure/PeerProcedureInterface.java   |   2 +-
 .../replication/ReplicationPeerManager.java |  51 +-
 ...ransitPeerSyncReplicationStateProcedure.java | 159 +++
 .../hbase/security/access/AccessController.java |   8 +
 .../replication/TestReplicationAdmin.java   |  62 
 .../hbase/master/MockNoopMasterServices.java|  11 +-
 .../cleaner/TestReplicationHFileCleaner.java|   4 +-
 .../TestReplicationTrackerZKImpl.java   |   6 +-
 .../TestReplicationSourceManager.java   |   3 +-
 .../security/access/TestAccessController.java   |  16 ++
 .../hbase/util/TestHBaseFsckReplication.java|   5 +-
 .../src/main/ruby/hbase/replication_admin.rb|  15 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   6 +-
 .../transit_peer_sync_replication_state.rb  |  44 +
 .../test/ruby/hbase/replication_admin_test.rb   |  24 +++
 40 files changed, 818 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ce755faa/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index b8546fa..167d6f3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -52,6 +52,7 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2648,6 +2649,44 @@ public interface Admin extends Abortable, Closeable {
   List listReplicationPeers(Pattern pattern) 
throws IOException;
 
   /**
+   * Transit current cluster to a new state in a synchronous replication peer.
+   * @param peerId a short name that identifies the peer
+   * @param state a new state of current cluster
+   * @throws IOException if a remote or network exception occurs
+   */
+  void transitReplicationPeerSyncReplicationState(String peerId, 
SyncReplicationState state)
+  throws IOException;
+
+  /**
+   * Transit current cluster to a new state in a synchronous replication peer. 
But does not block
+   * and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the 

[2/8] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

2018-02-05 Thread zhangduo
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d5e6f7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d5e6f7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d5e6f7d

Branch: refs/heads/HBASE-19064
Commit: 7d5e6f7d0542c4d18c7b247275a08ca338eb8f69
Parents: 5a358e7
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 ++---
 .../hbase/replication/SyncReplicationState.java | 17 +
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 ++
 .../replication/ZKReplicationPeerStorage.java   | 25 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 ---
 ...ransitPeerSyncReplicationStateProcedure.java |  9 ---
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d5e6f7d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 86b49ea..5096824 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -398,7 +398,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -406,17 +406,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d5e6f7d/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java

[6/8] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

2018-02-05 Thread zhangduo
HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5e104c9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5e104c9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5e104c9

Branch: refs/heads/HBASE-19064
Commit: e5e104c943746d34043c946e397f9149743aa0d4
Parents: 874aa32
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 21 +-
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 17 +++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 188 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5e104c9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index a234a9b..642149b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -315,6 +315,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -371,6 +374,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e5e104c9/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index bf8d030..4c10c46 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -46,6 +46,8 @@ public class ReplicationPeerConfig {
   private Map excludeTableCFsMap = 
null;
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -64,6 +66,7 @@ public class ReplicationPeerConfig {
 builder.excludeNamespaces != null ? 
Collections.unmodifiableSet(builder.excludeNamespaces)
 : null;
 this.bandwidth = builder.bandwidth;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map
@@ -210,6 +213,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -223,7 +230,8 @@ public class ReplicationPeerConfig {
 .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
 .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
 .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-.setBandwidth(peerConfig.getBandwidth());
+.setBandwidth(peerConfig.getBandwidth())
+.setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -250,6 +258,8 @@ public class ReplicationPeerConfig {
 
 private long bandwidth = 0;
 
+private String remoteWALDir = null;
+
 @Override
 public 

[8/8] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-02-05 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8e87051
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8e87051
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8e87051

Branch: refs/heads/HBASE-19064
Commit: e8e8705133204f7fea7ce361df5376c004b61ee2
Parents: e5e104c
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:30:19 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   3 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |  11 +-
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 663 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e8e87051/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 54a5cd3..a37bda3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -435,6 +435,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8e87051/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 24094e0..b741260 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -247,7 +247,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8e87051/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 6bf9e02..f92ce93 100644
--- 

[4/8] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-02-05 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/ce755faa/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
 String peerId1 = "1";
 String peerId2 = "2";
 peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 for (int i = 0; i < 10; i++) {
   queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + 
i), peerId1,
 "file-" + i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce755faa/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index ba7d191..d5d4844 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
 include Java
 
 java_import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
 java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -329,6 +330,20 @@ module Hbase
   '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
 end
 
+# Transit current cluster to a new state in the specified synchronous
+# replication peer
+def transit_peer_sync_replication_state(id, state)
+  if 'ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::ACTIVE)
+  elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::DOWNGRADE_ACTIVE)
+  elsif 'STANDBY'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::STANDBY)
+  else
+raise(ArgumentError, 'synchronous replication state must be ACTIVE, 
DOWNGRADE_ACTIVE or STANDBY')
+  end
+end
+
 
#--
 # Enables a table's replication switch
 def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce755faa/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 4a74646..28edd6d 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -394,6 +394,7 @@ Shell.load_command_group(
 get_peer_config
 list_peer_configs
 update_peer_config
+transit_peer_sync_replication_state
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce755faa/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index caeab86..aa10fda 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-REMOTE_ROOT_DIR STATE REPLICATE_ALL
-NAMESPACES TABLE_CFS BANDWIDTH])
+REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH])
 
 

hbase git commit: HBASE-19936 Introduce a new base class for replication peer procedure

2018-02-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 7f7f2b2de -> ad3a1ba49


HBASE-19936 Introduce a new base class for replication peer procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ad3a1ba4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ad3a1ba4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ad3a1ba4

Branch: refs/heads/master
Commit: ad3a1ba4955ee8a6d8470f1ad4fcc4f2c69e6787
Parents: 7f7f2b2
Author: zhangduo 
Authored: Mon Feb 5 16:14:25 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:23:19 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  2 +-
 .../replication/AbstractPeerProcedure.java  | 97 
 .../master/replication/ModifyPeerProcedure.java | 67 +-
 3 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad3a1ba4/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index ae676ea..83099c3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -398,7 +398,7 @@ message RefreshPeerParameter {
   required ServerName target_server = 3;
 }
 
-message ModifyPeerStateData {
+message PeerProcedureStateData {
   required string peer_id = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad3a1ba4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
new file mode 100644
index 000..0ad8a63
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerProcedureStateData;
+
+/**
+ * The base class for all replication peer related procedure.
+ */
+@InterfaceAudience.Private
+public abstract class AbstractPeerProcedure
+extends StateMachineProcedure implements 
PeerProcedureInterface {
+
+  protected String peerId;
+
+  private volatile boolean locked;
+
+  // used to keep compatible with old client where we can only returns after 
updateStorage.
+  protected ProcedurePrepareLatch latch;
+
+  protected AbstractPeerProcedure() {
+  }
+
+  protected AbstractPeerProcedure(String peerId) {
+this.peerId = peerId;
+this.latch = ProcedurePrepareLatch.createLatch(2, 0);
+  }
+
+  public ProcedurePrepareLatch getLatch() {
+return latch;
+  }
+
+  @Override
+  public String getPeerId() {
+return peerId;
+  }
+
+  @Override
+  protected LockState acquireLock(MasterProcedureEnv env) {
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
+  }
+
+  @Override
+  protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
+

hbase git commit: HBASE-19936 Introduce a new base class for replication peer procedure

2018-02-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397-branch-2 f03b19164 -> e6097eb3a


HBASE-19936 Introduce a new base class for replication peer procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6097eb3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6097eb3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6097eb3

Branch: refs/heads/HBASE-19397-branch-2
Commit: e6097eb3a02802e272827ab7abded903513fb14a
Parents: f03b191
Author: zhangduo 
Authored: Mon Feb 5 16:14:25 2018 +0800
Committer: zhangduo 
Committed: Mon Feb 5 20:23:57 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  2 +-
 .../replication/AbstractPeerProcedure.java  | 97 
 .../master/replication/ModifyPeerProcedure.java | 67 +-
 3 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6097eb3/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index ae676ea..83099c3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -398,7 +398,7 @@ message RefreshPeerParameter {
   required ServerName target_server = 3;
 }
 
-message ModifyPeerStateData {
+message PeerProcedureStateData {
   required string peer_id = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6097eb3/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
new file mode 100644
index 000..0ad8a63
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerProcedureStateData;
+
+/**
+ * The base class for all replication peer related procedure.
+ */
+@InterfaceAudience.Private
+public abstract class AbstractPeerProcedure
+extends StateMachineProcedure implements 
PeerProcedureInterface {
+
+  protected String peerId;
+
+  private volatile boolean locked;
+
+  // used to keep compatible with old client where we can only returns after 
updateStorage.
+  protected ProcedurePrepareLatch latch;
+
+  protected AbstractPeerProcedure() {
+  }
+
+  protected AbstractPeerProcedure(String peerId) {
+this.peerId = peerId;
+this.latch = ProcedurePrepareLatch.createLatch(2, 0);
+  }
+
+  public ProcedurePrepareLatch getLatch() {
+return latch;
+  }
+
+  @Override
+  public String getPeerId() {
+return peerId;
+  }
+
+  @Override
+  protected LockState acquireLock(MasterProcedureEnv env) {
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
+  }
+
+  @Override
+  protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
+

hbase git commit: HBASE-19658 make the test testFlatteningToJumboCellChunkMap() stable, by eliminating the possibility of third cell to be added while in-memory-flush is still in progress

2018-02-05 Thread anastasia
Repository: hbase
Updated Branches:
  refs/heads/branch-2 c2e3d5208 -> 514eadbe9


HBASE-19658 make the test testFlatteningToJumboCellChunkMap() stable, by 
eliminating the possibility of third cell to be added while in-memory-flush is 
still in progress


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/514eadbe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/514eadbe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/514eadbe

Branch: refs/heads/branch-2
Commit: 514eadbe9503c1162085890a86217a23457eb21d
Parents: c2e3d52
Author: anastas 
Authored: Mon Feb 5 11:29:10 2018 +0200
Committer: anastas 
Committed: Mon Feb 5 11:29:10 2018 +0200

--
 .../TestCompactingToCellFlatMapMemStore.java | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/514eadbe/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
index 0036426..25265b3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
@@ -754,9 +754,9 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 // set memstore to flat into CellChunkMap
 MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
 
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-String.valueOf(compactionType));
-((MyCompactingMemStore)memstore).initiateType(compactionType, 
memstore.getConfiguration());
-
((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
+String.valueOf(compactionType));
+((MyCompactingMemStore) memstore).initiateType(compactionType, 
memstore.getConfiguration());
+((CompactingMemStore) 
memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
 
 int numOfCells = 1;
 char[] chars = new char[MemStoreLAB.CHUNK_SIZE_DEFAULT];
@@ -764,7 +764,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
   chars[i] = 'A';
 }
 String bigVal = new String(chars);
-String[] keys1 = { "A"};
+String[] keys1 = {"A"};
 
 // make one cell
 byte[] row = Bytes.toBytes(keys1[0]);
@@ -784,7 +784,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
 assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
 
-((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and flatten
+((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
and flatten
 while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
   Threads.sleep(10);
 }
@@ -809,12 +809,17 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 
 memstore.clearSnapshot(snapshot.getId());
 
-String[] keys2 = { "C", "D", "E"};
+// Allocating two big cells (too big for being copied into a regular 
chunk).
+String[] keys2 = {"C", "D"};
 addRowsByKeys(memstore, keys2, val);
 while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
   Threads.sleep(10);
 }
-totalHeapSize = 1 * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD
+
+// The in-memory flush size is bigger than the size of a single cell,
+// but smaller than the size of two cells.
+// Therefore, the two created cells are flattened together.
+totalHeapSize = MutableSegment.DEEP_OVERHEAD
 + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM
 + 2 * oneCellOnCCMHeapSize;
 assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());