[2/6] hbase git commit: HBASE-20521 change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
HBASE-20521 change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/421ed6ca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/421ed6ca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/421ed6ca

Branch: refs/heads/branch-2
Commit: 421ed6ca513d09e1345a73c5da1289798a25ae47
Parents: d91908b
Author: Mike Drob 
Authored: Tue May 8 22:18:48 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:18:48 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/421ed6ca/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 0a1928b..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
-if(hConf == null) {
-  hConf = this.conf;
+Configuration hConf = getConf();
+if (hConf == null) {
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



[5/6] hbase git commit: Revert "change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests"

2018-05-08 Thread mdrob
Revert "change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests"

This reverts commit b748ea3b0d3a074b976fa0ae59fc591303e805a0.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/82e6fae1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/82e6fae1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/82e6fae1

Branch: refs/heads/master
Commit: 82e6fae1de5a5f700c0237dbaa8994f409833370
Parents: 0dcae90
Author: Mike Drob 
Authored: Tue May 8 22:20:06 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:20:06 2018 -0500

--
 .../hbase/mapreduce/TableOutputFormat.java  |   4 +-
 .../hbase/spark/TableOutputFormatSuite.scala| 130 ---
 2 files changed, 2 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/82e6fae1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 78be5af..4eb2654 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = getConf();
+Configuration hConf = context.getConfiguration();
 if (hConf == null) {
-  hConf = context.getConfiguration();
+  hConf = this.conf;
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/82e6fae1/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
--
diff --git 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
deleted file mode 100644
index b306f08..000
--- 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.spark
-
-
-import java.text.SimpleDateFormat
-import java.util.{Date, Locale}
-
-import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
-import org.apache.hadoop.hbase.{HBaseTestingUtility, TableName, 
TableNotFoundException}
-import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
-import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
-import org.apache.spark.{SparkConf, SparkContext}
-import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
-
-import scala.util.{Failure, Success, Try}
-
-
-// Unit tests for HBASE-20521: change get 
configuration(TableOutputFormat.conf) object first sequence from jobContext to 
getConf
-// this suite contains two tests, one for normal case(getConf return null, use 
jobContext), create new TableOutputformat object without init 
TableOutputFormat.conf object,
-// configuration object inside checkOutputSpecs came from jobContext.
-// The other one(getConf return conf object) we manually call "setConf" to 
init TableOutputFormat.conf, for making it more straight forward, we specify a 
nonexistent table
-// name in conf object, checkOutputSpecs will then throw 
TableNotFoundException exception
-class TableOutputFormatSuite extends FunSuite with
-  BeforeAndAfterEach with BeforeAndAfterAll with Logging{
-  @transient var sc: SparkContext = null
-  var TEST_UTIL = new HBaseTestingUtility
-
-  val tableName = "TableOutputFormatTest"
-  val 

[1/6] hbase git commit: Revert "change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests"

2018-05-08 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8d60a89c3 -> 421ed6ca5
  refs/heads/branch-2.0 9e3b28804 -> c15fd6b00
  refs/heads/master 0dcae90ea -> c51e9adc7


Revert "change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests"

This reverts commit 9e68719014a62f37e7559329e44e2df49738ef6c.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d91908b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d91908b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d91908b0

Branch: refs/heads/branch-2
Commit: d91908b0d46156fa364ba11d476b9cdbc01d0411
Parents: 8d60a89
Author: Mike Drob 
Authored: Tue May 8 22:18:42 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:18:42 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d91908b0/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 78be5af..0a1928b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = getConf();
-if (hConf == null) {
-  hConf = context.getConfiguration();
+Configuration hConf = context.getConfiguration();
+if(hConf == null) {
+  hConf = this.conf;
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



[3/6] hbase git commit: Revert "change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
Revert "change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests

This reverts commit 9e3b28804d28ad586d12df24a2e5417c25cff857.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/18ca1fcb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/18ca1fcb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/18ca1fcb

Branch: refs/heads/branch-2.0
Commit: 18ca1fcb560236b8c9a0ca057f163ebdaab89f0e
Parents: 9e3b288
Author: Mike Drob 
Authored: Tue May 8 22:19:31 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:19:31 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/18ca1fcb/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 78be5af..0a1928b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = getConf();
-if (hConf == null) {
-  hConf = context.getConfiguration();
+Configuration hConf = context.getConfiguration();
+if(hConf == null) {
+  hConf = this.conf;
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



[4/6] hbase git commit: HBASE-20521 change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
HBASE-20521 change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c15fd6b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c15fd6b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c15fd6b0

Branch: refs/heads/branch-2.0
Commit: c15fd6b00a37ed927c33cb13401dc8dfe234e595
Parents: 18ca1fc
Author: Mike Drob 
Authored: Tue May 8 22:18:48 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:19:44 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c15fd6b0/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 0a1928b..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
-if(hConf == null) {
-  hConf = this.conf;
+Configuration hConf = getConf();
+if (hConf == null) {
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



[6/6] hbase git commit: HBASE-20521 change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
HBASE-20521 change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c51e9adc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c51e9adc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c51e9adc

Branch: refs/heads/master
Commit: c51e9adc78492cd9a12b513bdc83a4a10a043c52
Parents: 82e6fae
Author: Mike Drob 
Authored: Tue May 8 22:20:12 2018 -0500
Committer: Mike Drob 
Committed: Tue May 8 22:20:12 2018 -0500

--
 .../hbase/mapreduce/TableOutputFormat.java  |   4 +-
 .../hbase/spark/TableOutputFormatSuite.scala| 130 +++
 2 files changed, 132 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c51e9adc/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 4eb2654..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
+Configuration hConf = getConf();
 if (hConf == null) {
-  hConf = this.conf;
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c51e9adc/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
--
diff --git 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
new file mode 100644
index 000..b306f08
--- /dev/null
+++ 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+
+import java.text.SimpleDateFormat
+import java.util.{Date, Locale}
+
+import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
+import org.apache.hadoop.hbase.{HBaseTestingUtility, TableName, 
TableNotFoundException}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
+import org.apache.spark.{SparkConf, SparkContext}
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
+
+import scala.util.{Failure, Success, Try}
+
+
+// Unit tests for HBASE-20521: change get 
configuration(TableOutputFormat.conf) object first sequence from jobContext to 
getConf
+// this suite contains two tests, one for normal case(getConf return null, use 
jobContext), create new TableOutputformat object without init 
TableOutputFormat.conf object,
+// configuration object inside checkOutputSpecs came from jobContext.
+// The other one(getConf return conf object) we manually call "setConf" to 
init TableOutputFormat.conf, for making it more straight forward, we specify a 
nonexistent table
+// name in conf object, checkOutputSpecs will then throw 
TableNotFoundException exception
+class TableOutputFormatSuite extends FunSuite with
+  BeforeAndAfterEach with BeforeAndAfterAll with Logging{
+  @transient var sc: SparkContext = null
+  var TEST_UTIL = new HBaseTestingUtility
+
+  val tableName = "TableOutputFormatTest"
+  val tableNameTest = "NonExistentTable"
+  val columnFamily = "cf"
+
+  

[1/2] hbase git commit: Revert "Fix the flaky TestThriftHttpServer"

2018-05-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master 4f7aa3b71 -> 0dcae90ea


Revert "Fix the flaky TestThriftHttpServer"

This reverts commit 4f7aa3b71d2ebfa57f890c5ebaaf166fb7128597.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dda8018b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dda8018b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dda8018b

Branch: refs/heads/master
Commit: dda8018b0fd2939f98c8c86158226202f4692333
Parents: 4f7aa3b
Author: huzheng 
Authored: Wed May 9 10:44:35 2018 +0800
Committer: huzheng 
Committed: Wed May 9 10:44:35 2018 +0800

--
 .../hbase/thrift/TestThriftHttpServer.java  | 27 +---
 1 file changed, 18 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dda8018b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index 2366ee7..6117953 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -57,6 +57,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
  * interface and talk to it from client side.
  */
 @Category({ClientTests.class, LargeTests.class})
+
 public class TestThriftHttpServer {
 
   @ClassRule
@@ -117,14 +118,18 @@ public class TestThriftHttpServer {
 LOG.info("Starting HBase Thrift server with HTTP server: " + Joiner.on(" 
").join(args));
 
 httpServerException = null;
-httpServerThread = new Thread(() -> {
-  try {
-thriftServer.doMain(args);
-  } catch (Exception e) {
-httpServerException = e;
+httpServerThread = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  thriftServer.doMain(args);
+} catch (Exception e) {
+  httpServerException = e;
+}
   }
 });
-httpServerThread.setName(ThriftServer.class.getSimpleName() + 
"-httpServer");
+httpServerThread.setName(ThriftServer.class.getSimpleName() +
+"-httpServer");
 httpServerThread.start();
   }
 
@@ -163,9 +168,13 @@ public class TestThriftHttpServer {
 startHttpServerThread(args.toArray(new String[args.size()]));
 
 // wait up to 10s for the server to start
-HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, port);
+for (int i = 0; i < 100
+&& (thriftServer.serverRunner == null ||  
thriftServer.serverRunner.httpServer ==
+null); i++) {
+  Thread.sleep(100);
+}
 
-String url = "http://; + HConstants.LOCALHOST + ":" + port;
+String url = "http://"+ HConstants.LOCALHOST + ":" + port;
 try {
   checkHttpMethods(url);
   talkToThriftServer(url, customHeaderSize);
@@ -177,7 +186,7 @@ public class TestThriftHttpServer {
 
 if (clientSideException != null) {
   LOG.error("Thrift client threw an exception " + clientSideException);
-  if (clientSideException instanceof TTransportException) {
+  if (clientSideException instanceof  TTransportException) {
 throw clientSideException;
   } else {
 throw new Exception(clientSideException);



[2/2] hbase git commit: HBASE-20543 Fix the flaky TestThriftHttpServer

2018-05-08 Thread openinx
HBASE-20543 Fix the flaky TestThriftHttpServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0dcae90e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0dcae90e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0dcae90e

Branch: refs/heads/master
Commit: 0dcae90eaab985180ac145fa04077360aa03b8c7
Parents: dda8018
Author: huzheng 
Authored: Wed May 9 10:50:09 2018 +0800
Committer: huzheng 
Committed: Wed May 9 10:50:09 2018 +0800

--
 .../hbase/thrift/TestThriftHttpServer.java  | 27 +++-
 1 file changed, 9 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0dcae90e/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index 6117953..2366ee7 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -57,7 +57,6 @@ import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
  * interface and talk to it from client side.
  */
 @Category({ClientTests.class, LargeTests.class})
-
 public class TestThriftHttpServer {
 
   @ClassRule
@@ -118,18 +117,14 @@ public class TestThriftHttpServer {
 LOG.info("Starting HBase Thrift server with HTTP server: " + Joiner.on(" 
").join(args));
 
 httpServerException = null;
-httpServerThread = new Thread(new Runnable() {
-  @Override
-  public void run() {
-try {
-  thriftServer.doMain(args);
-} catch (Exception e) {
-  httpServerException = e;
-}
+httpServerThread = new Thread(() -> {
+  try {
+thriftServer.doMain(args);
+  } catch (Exception e) {
+httpServerException = e;
   }
 });
-httpServerThread.setName(ThriftServer.class.getSimpleName() +
-"-httpServer");
+httpServerThread.setName(ThriftServer.class.getSimpleName() + 
"-httpServer");
 httpServerThread.start();
   }
 
@@ -168,13 +163,9 @@ public class TestThriftHttpServer {
 startHttpServerThread(args.toArray(new String[args.size()]));
 
 // wait up to 10s for the server to start
-for (int i = 0; i < 100
-&& (thriftServer.serverRunner == null ||  
thriftServer.serverRunner.httpServer ==
-null); i++) {
-  Thread.sleep(100);
-}
+HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, port);
 
-String url = "http://"+ HConstants.LOCALHOST + ":" + port;
+String url = "http://; + HConstants.LOCALHOST + ":" + port;
 try {
   checkHttpMethods(url);
   talkToThriftServer(url, customHeaderSize);
@@ -186,7 +177,7 @@ public class TestThriftHttpServer {
 
 if (clientSideException != null) {
   LOG.error("Thrift client threw an exception " + clientSideException);
-  if (clientSideException instanceof  TTransportException) {
+  if (clientSideException instanceof TTransportException) {
 throw clientSideException;
   } else {
 throw new Exception(clientSideException);



hbase git commit: Fix the flaky TestThriftHttpServer

2018-05-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master b748ea3b0 -> 4f7aa3b71


Fix the flaky TestThriftHttpServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f7aa3b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f7aa3b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f7aa3b7

Branch: refs/heads/master
Commit: 4f7aa3b71d2ebfa57f890c5ebaaf166fb7128597
Parents: b748ea3
Author: huzheng 
Authored: Tue May 8 20:50:45 2018 +0800
Committer: huzheng 
Committed: Wed May 9 09:50:59 2018 +0800

--
 .../hbase/thrift/TestThriftHttpServer.java  | 27 +++-
 1 file changed, 9 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f7aa3b7/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index 6117953..2366ee7 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -57,7 +57,6 @@ import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
  * interface and talk to it from client side.
  */
 @Category({ClientTests.class, LargeTests.class})
-
 public class TestThriftHttpServer {
 
   @ClassRule
@@ -118,18 +117,14 @@ public class TestThriftHttpServer {
 LOG.info("Starting HBase Thrift server with HTTP server: " + Joiner.on(" 
").join(args));
 
 httpServerException = null;
-httpServerThread = new Thread(new Runnable() {
-  @Override
-  public void run() {
-try {
-  thriftServer.doMain(args);
-} catch (Exception e) {
-  httpServerException = e;
-}
+httpServerThread = new Thread(() -> {
+  try {
+thriftServer.doMain(args);
+  } catch (Exception e) {
+httpServerException = e;
   }
 });
-httpServerThread.setName(ThriftServer.class.getSimpleName() +
-"-httpServer");
+httpServerThread.setName(ThriftServer.class.getSimpleName() + 
"-httpServer");
 httpServerThread.start();
   }
 
@@ -168,13 +163,9 @@ public class TestThriftHttpServer {
 startHttpServerThread(args.toArray(new String[args.size()]));
 
 // wait up to 10s for the server to start
-for (int i = 0; i < 100
-&& (thriftServer.serverRunner == null ||  
thriftServer.serverRunner.httpServer ==
-null); i++) {
-  Thread.sleep(100);
-}
+HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, port);
 
-String url = "http://"+ HConstants.LOCALHOST + ":" + port;
+String url = "http://; + HConstants.LOCALHOST + ":" + port;
 try {
   checkHttpMethods(url);
   talkToThriftServer(url, customHeaderSize);
@@ -186,7 +177,7 @@ public class TestThriftHttpServer {
 
 if (clientSideException != null) {
   LOG.error("Thrift client threw an exception " + clientSideException);
-  if (clientSideException instanceof  TTransportException) {
+  if (clientSideException instanceof TTransportException) {
 throw clientSideException;
   } else {
 throw new Exception(clientSideException);



hbase git commit: HBASE-20543 Fix the flaky TestThriftHttpServer

2018-05-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 75a8e53ce -> 8d60a89c3


HBASE-20543 Fix the flaky TestThriftHttpServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d60a89c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d60a89c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d60a89c

Branch: refs/heads/branch-2
Commit: 8d60a89c348f9428dabb3ff924c4b2f22a127562
Parents: 75a8e53
Author: huzheng 
Authored: Wed May 9 09:56:34 2018 +0800
Committer: huzheng 
Committed: Wed May 9 10:41:08 2018 +0800

--
 .../hbase/thrift/TestThriftHttpServer.java  | 27 +++-
 1 file changed, 9 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d60a89c/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index c3fecf6..2366ee7 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -57,7 +57,6 @@ import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
  * interface and talk to it from client side.
  */
 @Category({ClientTests.class, LargeTests.class})
-
 public class TestThriftHttpServer {
 
   @ClassRule
@@ -118,18 +117,14 @@ public class TestThriftHttpServer {
 LOG.info("Starting HBase Thrift server with HTTP server: " + Joiner.on(" 
").join(args));
 
 httpServerException = null;
-httpServerThread = new Thread(new Runnable() {
-  @Override
-  public void run() {
-try {
-  thriftServer.doMain(args);
-} catch (Exception e) {
-  httpServerException = e;
-}
+httpServerThread = new Thread(() -> {
+  try {
+thriftServer.doMain(args);
+  } catch (Exception e) {
+httpServerException = e;
   }
 });
-httpServerThread.setName(ThriftServer.class.getSimpleName() +
-"-httpServer");
+httpServerThread.setName(ThriftServer.class.getSimpleName() + 
"-httpServer");
 httpServerThread.start();
   }
 
@@ -168,13 +163,9 @@ public class TestThriftHttpServer {
 startHttpServerThread(args.toArray(new String[args.size()]));
 
 // wait up to 10s for the server to start
-for (int i = 0; i < 100
-&& ( thriftServer.serverRunner == null ||  
thriftServer.serverRunner.httpServer ==
-null); i++) {
-  Thread.sleep(100);
-}
+HBaseTestingUtility.waitForHostPort(HConstants.LOCALHOST, port);
 
-String url = "http://"+ HConstants.LOCALHOST + ":" + port;
+String url = "http://; + HConstants.LOCALHOST + ":" + port;
 try {
   checkHttpMethods(url);
   talkToThriftServer(url, customHeaderSize);
@@ -186,7 +177,7 @@ public class TestThriftHttpServer {
 
 if (clientSideException != null) {
   LOG.error("Thrift client threw an exception " + clientSideException);
-  if (clientSideException instanceof  TTransportException) {
+  if (clientSideException instanceof TTransportException) {
 throw clientSideException;
   } else {
 throw new Exception(clientSideException);



[hbase] Git Push Summary

2018-05-08 Thread apurtell
Repository: hbase
Updated Tags:  refs/tags/rel/1.4.4 [created] 3d1c7dde7


hbase git commit: HBASE-20500 [rsgroup] should keep at least one server in default group

2018-05-08 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9e6871901 -> 75a8e53ce


HBASE-20500 [rsgroup] should keep at least one server in default group

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75a8e53c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75a8e53c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75a8e53c

Branch: refs/heads/branch-2
Commit: 75a8e53ce84c38fd06086560d87ba93b543ae5f1
Parents: 9e68719
Author: Yechao Chen 
Authored: Tue May 8 14:04:59 2018 +0800
Committer: tedyu 
Committed: Tue May 8 08:35:17 2018 -0700

--
 .../hbase/rsgroup/RSGroupAdminServer.java   |  5 +++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  | 47 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75a8e53c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index f7e7731..9b58834 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -56,6 +56,8 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 @InterfaceAudience.Private
 public class RSGroupAdminServer implements RSGroupAdmin {
   private static final Logger LOG = 
LoggerFactory.getLogger(RSGroupAdminServer.class);
+  public static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = 
"should keep at least " +
+  "one server in 'default' RSGroup.";
 
   private MasterServices master;
   private final RSGroupInfoManager rsGroupInfoManager;
@@ -307,6 +309,9 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   // Only move online servers (when moving from 'default') or servers from 
other
   // groups. This prevents bogus servers from entering groups
   if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
+if (srcGrp.getServers().size() <= servers.size()) {
+  throw new 
ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
+}
 checkOnlineServersOnly(servers);
   }
   // Ensure all servers are of same rsgroup.

http://git-wip-us.apache.org/repos/asf/hbase/blob/75a8e53c/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 9422bf8..e70a88b 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -339,6 +339,53 @@ public abstract class TestRSGroupsBase {
   }
 
   @Test
+  public void testMoveServersFromDefaultGroup() throws Exception {
+//create groups and assign servers
+rsGroupAdmin.addRSGroup("foo");
+
+RSGroupInfo fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
+assertEquals(0, fooGroup.getServers().size());
+RSGroupInfo defaultGroup = 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+
+//test remove all servers from default
+try {
+  rsGroupAdmin.moveServers(defaultGroup.getServers(), fooGroup.getName());
+  fail(RSGroupAdminServer.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
+} catch (ConstraintException ex) {
+  assertTrue(ex.getMessage().contains(RSGroupAdminServer
+  .KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE));
+}
+
+//test success case, remove one server from default ,keep at least one 
server
+if (defaultGroup.getServers().size() > 1) {
+  Address serverInDefaultGroup = 
defaultGroup.getServers().iterator().next();
+  LOG.info("moving server " + serverInDefaultGroup + " from group default 
to group " +
+  fooGroup.getName());
+  rsGroupAdmin.moveServers(Sets.newHashSet(serverInDefaultGroup), 
fooGroup.getName());
+}
+
+fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
+LOG.info("moving servers " + fooGroup.getServers() + " to group default");
+rsGroupAdmin.moveServers(fooGroup.getServers(), RSGroupInfo.DEFAULT_GROUP);
+
+TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+  @Override
+  public boolean evaluate() throws Exception {
+return getNumServers() ==
+

[3/3] hbase git commit: change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e3b2880
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e3b2880
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e3b2880

Branch: refs/heads/branch-2.0
Commit: 9e3b28804d28ad586d12df24a2e5417c25cff857
Parents: 9653a4d
Author: michael.jin 
Authored: Fri May 4 11:33:50 2018 +0800
Committer: Mike Drob 
Committed: Tue May 8 10:27:26 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e3b2880/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 0a1928b..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
-if(hConf == null) {
-  hConf = this.conf;
+Configuration hConf = getConf();
+if (hConf == null) {
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



[1/3] hbase git commit: change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 6f2ec4639 -> 9e6871901
  refs/heads/branch-2.0 9653a4d0d -> 9e3b28804
  refs/heads/master 102f0bf9c -> b748ea3b0


change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b748ea3b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b748ea3b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b748ea3b

Branch: refs/heads/master
Commit: b748ea3b0d3a074b976fa0ae59fc591303e805a0
Parents: 102f0bf
Author: michael.jin 
Authored: Fri May 4 11:33:50 2018 +0800
Committer: Mike Drob 
Committed: Tue May 8 10:24:03 2018 -0500

--
 .../hbase/mapreduce/TableOutputFormat.java  |   4 +-
 .../hbase/spark/TableOutputFormatSuite.scala| 130 +++
 2 files changed, 132 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b748ea3b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 4eb2654..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
+Configuration hConf = getConf();
 if (hConf == null) {
-  hConf = this.conf;
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b748ea3b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
--
diff --git 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
new file mode 100644
index 000..b306f08
--- /dev/null
+++ 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/TableOutputFormatSuite.scala
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.spark
+
+
+import java.text.SimpleDateFormat
+import java.util.{Date, Locale}
+
+import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
+import org.apache.hadoop.hbase.{HBaseTestingUtility, TableName, 
TableNotFoundException}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
+import org.apache.spark.{SparkConf, SparkContext}
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}
+
+import scala.util.{Failure, Success, Try}
+
+
+// Unit tests for HBASE-20521: change get 
configuration(TableOutputFormat.conf) object first sequence from jobContext to 
getConf
+// this suite contains two tests, one for normal case(getConf return null, use 
jobContext), create new TableOutputformat object without init 
TableOutputFormat.conf object,
+// configuration object inside checkOutputSpecs came from jobContext.
+// The other one(getConf return conf object) we manually call "setConf" to 
init TableOutputFormat.conf, for making it more straight forward, we specify a 
nonexistent table
+// name in conf object, checkOutputSpecs will then throw 
TableNotFoundException exception
+class TableOutputFormatSuite extends FunSuite with
+  BeforeAndAfterEach with BeforeAndAfterAll with Logging{
+  @transient var sc: SparkContext = null
+  

[2/3] hbase git commit: change getConf as first sequence instead of jobContext in TableOutputFormat.checkOutputSpec, add unit tests

2018-05-08 Thread mdrob
change getConf as first sequence instead of jobContext in 
TableOutputFormat.checkOutputSpec, add unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e687190
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e687190
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e687190

Branch: refs/heads/branch-2
Commit: 9e68719014a62f37e7559329e44e2df49738ef6c
Parents: 6f2ec46
Author: michael.jin 
Authored: Fri May 4 11:33:50 2018 +0800
Committer: Mike Drob 
Committed: Tue May 8 10:25:56 2018 -0500

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e687190/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 0a1928b..78be5af 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,9 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
-Configuration hConf = context.getConfiguration();
-if(hConf == null) {
-  hConf = this.conf;
+Configuration hConf = getConf();
+if (hConf == null) {
+  hConf = context.getConfiguration();
 }
 
 try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {



hbase git commit: HBASE-20500 [rsgroup] should keep at least one server in default group

2018-05-08 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 1825af45b -> 102f0bf9c


HBASE-20500 [rsgroup] should keep at least one server in default group

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/102f0bf9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/102f0bf9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/102f0bf9

Branch: refs/heads/master
Commit: 102f0bf9c565f70afd272cd5f2cb39305c3783f8
Parents: 1825af4
Author: Yechao Chen 
Authored: Tue May 8 14:04:59 2018 +0800
Committer: tedyu 
Committed: Tue May 8 07:59:01 2018 -0700

--
 .../hbase/rsgroup/RSGroupAdminServer.java   |  5 +++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  | 47 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/102f0bf9/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 094fc1d..670e8aa 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -56,6 +56,8 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 @InterfaceAudience.Private
 public class RSGroupAdminServer implements RSGroupAdmin {
   private static final Logger LOG = 
LoggerFactory.getLogger(RSGroupAdminServer.class);
+  public static final String KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE = 
"should keep at least " +
+  "one server in 'default' RSGroup.";
 
   private MasterServices master;
   private final RSGroupInfoManager rsGroupInfoManager;
@@ -307,6 +309,9 @@ public class RSGroupAdminServer implements RSGroupAdmin {
   // Only move online servers (when moving from 'default') or servers from 
other
   // groups. This prevents bogus servers from entering groups
   if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
+if (srcGrp.getServers().size() <= servers.size()) {
+  throw new 
ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
+}
 checkOnlineServersOnly(servers);
   }
   // Ensure all servers are of same rsgroup.

http://git-wip-us.apache.org/repos/asf/hbase/blob/102f0bf9/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 9e29637..199dd98 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -340,6 +340,53 @@ public abstract class TestRSGroupsBase {
   }
 
   @Test
+  public void testMoveServersFromDefaultGroup() throws Exception {
+//create groups and assign servers
+rsGroupAdmin.addRSGroup("foo");
+
+RSGroupInfo fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
+assertEquals(0, fooGroup.getServers().size());
+RSGroupInfo defaultGroup = 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+
+//test remove all servers from default
+try {
+  rsGroupAdmin.moveServers(defaultGroup.getServers(), fooGroup.getName());
+  fail(RSGroupAdminServer.KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
+} catch (ConstraintException ex) {
+  assertTrue(ex.getMessage().contains(RSGroupAdminServer
+  .KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE));
+}
+
+//test success case, remove one server from default ,keep at least one 
server
+if (defaultGroup.getServers().size() > 1) {
+  Address serverInDefaultGroup = 
defaultGroup.getServers().iterator().next();
+  LOG.info("moving server " + serverInDefaultGroup + " from group default 
to group " +
+  fooGroup.getName());
+  rsGroupAdmin.moveServers(Sets.newHashSet(serverInDefaultGroup), 
fooGroup.getName());
+}
+
+fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
+LOG.info("moving servers " + fooGroup.getServers() + " to group default");
+rsGroupAdmin.moveServers(fooGroup.getServers(), RSGroupInfo.DEFAULT_GROUP);
+
+TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+  @Override
+  public boolean evaluate() throws Exception {
+return getNumServers() ==
+

[29/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum 

[16/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   

hbase-site git commit: INFRA-10751 Empty commit

2018-05-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d70bb89e8 -> 25edd2062


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/25edd206
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/25edd206
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/25edd206

Branch: refs/heads/asf-site
Commit: 25edd2062242271c1b84b15fe049567ec743cd2f
Parents: d70bb89
Author: jenkins 
Authored: Tue May 8 14:48:13 2018 +
Committer: jenkins 
Committed: Tue May 8 14:48:13 2018 +

--

--




[10/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,

[42/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
index 1c14b10..4639252 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.AsyncTest
+abstract static class PerformanceEvaluation.AsyncTest
 extends PerformanceEvaluation.TestBase
 
 
@@ -230,7 +230,7 @@ extends 
 
 connection
-protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
+protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
 
 
 
@@ -247,7 +247,7 @@ extends 
 
 AsyncTest
-AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
+AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
   PerformanceEvaluation.TestOptionsoptions,
   PerformanceEvaluation.Statusstatus)
 
@@ -266,7 +266,7 @@ extends 
 
 createConnection
-voidcreateConnection()
+voidcreateConnection()
 
 Specified by:
 createConnectionin
 classPerformanceEvaluation.TestBase
@@ -279,7 +279,7 @@ extends 
 
 closeConnection
-voidcloseConnection()
+voidcloseConnection()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index 4092ce1..a75ee34 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.BufferedMutatorTest
+abstract static class PerformanceEvaluation.BufferedMutatorTest
 extends PerformanceEvaluation.Test
 
 
@@ -253,7 +253,7 @@ extends 
 
 mutator
-protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
+protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
 
 
 
@@ -262,7 +262,7 @@ extends 
 
 table
-protectedorg.apache.hadoop.hbase.client.Table table
+protectedorg.apache.hadoop.hbase.client.Table table
 
 
 
@@ -279,7 +279,7 @@ extends 
 
 BufferedMutatorTest
-BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
+BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
 PerformanceEvaluation.TestOptionsoptions,
 PerformanceEvaluation.Statusstatus)
 
@@ -298,7 +298,7 @@ extends 
 
 onStartup
-voidonStartup()
+voidonStartup()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -314,7 +314,7 @@ extends 
 
 onTakedown
-voidonTakedown()
+voidonTakedown()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index 49cdd9c..217e0da 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.CASTableTest
+abstract static class PerformanceEvaluation.CASTableTest
 extends PerformanceEvaluation.TableTest
 Base class for operations that are CAS-like; that read a 
value and then set it based off what
  they read. In this category is increment, append, checkAndPut, etc.
@@ -278,7 +278,7 @@ extends 
 
 qualifier
-private finalbyte[] qualifier
+private finalbyte[] qualifier
 
 
 
@@ -295,7 +295,7 @@ extends 
 
 CASTableTest
-CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
+CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,

[21/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211

[44/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 6b1e876..9aba02b 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -284,7 +284,7 @@
 3609
 0
 0
-15875
+15879
 
 Files
 
@@ -617,7 +617,7 @@
 org/apache/hadoop/hbase/PerformanceEvaluation.java
 0
 0
-34
+37
 
 org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
 0
@@ -3254,6951 +3254,6956 @@
 0
 4
 
+org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
+0
+0
+1
+
 org/apache/hadoop/hbase/io/compress/Compression.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/io/crypto/Cipher.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/Decryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/Encryption.java
 0
 0
 53
-
+
 org/apache/hadoop/hbase/io/crypto/Encryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/KeyProvider.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/TestEncryption.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CryptoAES.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCache.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 0
 0
 25
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockType.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CacheConfig.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/io/hfile/CacheStats.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/Cacheable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/HFile.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/io/hfile/HFileBlock.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 0
 0
 39
-
+
 org/apache/hadoop/hbase/io/hfile/HFileContext.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 0
 0
 51
-
+
 org/apache/hadoop/hbase/io/hfile/HFileScanner.java
 0
 0
 23
-
+
 org/apache/hadoop/hbase/io/hfile/HFileUtil.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
 0
 0
 25
-
+
 org/apache/hadoop/hbase/io/hfile/KVGenerator.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/hfile/NanoTimer.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/RandomKeyValueUtil.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
 0
 0
 3
-
+
 

[27/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {

[14/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  

[41/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
index 52d05d8..df30a00 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.ScanTest
+static class PerformanceEvaluation.ScanTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -264,7 +264,7 @@ extends 
 
 testScanner
-privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
+privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 ScanTest
-ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
+ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,
  PerformanceEvaluation.Statusstatus)
 
@@ -300,7 +300,7 @@ extends 
 
 testTakedown
-voidtestTakedown()
+voidtestTakedown()
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -316,7 +316,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
index daa53a1..fe182c6 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.SequentialReadTest
+static class PerformanceEvaluation.SequentialReadTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -249,7 +249,7 @@ extends 
 
 SequentialReadTest
-SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
+SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
PerformanceEvaluation.TestOptionsoptions,
PerformanceEvaluation.Statusstatus)
 
@@ -268,7 +268,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
index 4c8f51d..d5d599f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.SequentialWriteTest
+static class PerformanceEvaluation.SequentialWriteTest
 extends PerformanceEvaluation.BufferedMutatorTest
 
 
@@ -249,7 +249,7 @@ extends 
 
 SequentialWriteTest
-SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
+SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
 PerformanceEvaluation.TestOptionsoptions,
 PerformanceEvaluation.Statusstatus)
 
@@ -268,7 +268,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
index 532dda5..02fdcb3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
+++ 

[12/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** 

[28/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */

[03/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212   

[32/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  

[45/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d70bb89e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d70bb89e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d70bb89e

Branch: refs/heads/asf-site
Commit: d70bb89e80991158b127c88133145cc218e32874
Parents: 8b3429d
Author: jenkins 
Authored: Tue May 8 14:47:57 2018 +
Committer: jenkins 
Committed: Tue May 8 14:47:57 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 5152 -
 checkstyle.rss  |6 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |4 +-
 .../org/apache/hadoop/hbase/Version.html|4 +-
 downloads.html  |8 +-
 export_control.html |4 +-
 index.html  |4 +-
 integration.html|4 +-
 issue-tracking.html |4 +-
 license.html|4 +-
 mail-lists.html |4 +-
 metrics.html|4 +-
 old_news.html   |4 +-
 plugin-management.html  |4 +-
 plugins.html|4 +-
 poweredbyhbase.html |4 +-
 project-info.html   |4 +-
 project-reports.html|4 +-
 project-summary.html|4 +-
 pseudo-distributed.html |4 +-
 replication.html|4 +-
 resources.html  |4 +-
 source-repository.html  |4 +-
 sponsors.html   |4 +-
 supportingprojects.html |4 +-
 team-list.html  |4 +-
 testdevapidocs/constant-values.html |   36 +-
 testdevapidocs/index-all.html   |   20 +-
 .../hbase/PerformanceEvaluation.AppendTest.html |6 +-
 ...rformanceEvaluation.AsyncRandomReadTest.html |   20 +-
 ...formanceEvaluation.AsyncRandomWriteTest.html |6 +-
 .../PerformanceEvaluation.AsyncScanTest.html|   14 +-
 ...manceEvaluation.AsyncSequentialReadTest.html |6 +-
 ...anceEvaluation.AsyncSequentialWriteTest.html |6 +-
 .../PerformanceEvaluation.AsyncTableTest.html   |   10 +-
 .../hbase/PerformanceEvaluation.AsyncTest.html  |   10 +-
 ...rformanceEvaluation.BufferedMutatorTest.html |   12 +-
 .../PerformanceEvaluation.CASTableTest.html |   12 +-
 ...erformanceEvaluation.CheckAndDeleteTest.html |6 +-
 ...erformanceEvaluation.CheckAndMutateTest.html |6 +-
 .../PerformanceEvaluation.CheckAndPutTest.html  |6 +-
 .../PerformanceEvaluation.CmdDescriptor.html|   16 +-
 .../hbase/PerformanceEvaluation.Counter.html|   10 +-
 ...PerformanceEvaluation.EvaluationMapTask.html |   16 +-
 .../PerformanceEvaluation.FilteredScanTest.html |   10 +-
 .../PerformanceEvaluation.IncrementTest.html|6 +-
 .../PerformanceEvaluation.RandomReadTest.html   |   16 +-
 ...Evaluation.RandomScanWithRange1Test.html |6 +-
 ...eEvaluation.RandomScanWithRange1000Test.html |6 +-
 ...ceEvaluation.RandomScanWithRange100Test.html |6 +-
 ...nceEvaluation.RandomScanWithRange10Test.html |6 +-
 ...manceEvaluation.RandomScanWithRangeTest.html |   12 +-
 ...erformanceEvaluation.RandomSeekScanTest.html |8 +-
 .../PerformanceEvaluation.RandomWriteTest.html  |6 +-
 .../hbase/PerformanceEvaluation.RunResult.html  |   12 +-
 .../hbase/PerformanceEvaluation.ScanTest.html   |   10 +-
 ...erformanceEvaluation.SequentialReadTest.html |6 +-
 ...rformanceEvaluation.SequentialWriteTest.html |6 +-
 .../hbase/PerformanceEvaluation.Status.html |4 +-
 .../hbase/PerformanceEvaluation.TableTest.html  |   10 +-
 .../hbase/PerformanceEvaluation.Test.html   |   10 +-
 .../hbase/PerformanceEvaluation.TestBase.html   |   86 +-
 .../PerformanceEvaluation.TestOptions.html  |  477 +-
 .../hadoop/hbase/PerformanceEvaluation.html |  139 +-
 

[24/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time 

[34/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time 

[01/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8b3429d50 -> d70bb89e8


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.html
index 84a0b52..845fa29 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.html
@@ -71,198 +71,200 @@
 063import org.junit.ClassRule;
 064import org.junit.Rule;
 065import org.junit.Test;
-066import 
org.junit.experimental.categories.Category;
-067import org.junit.rules.TestName;
-068import org.junit.runner.RunWith;
-069import org.junit.runners.Parameterized;
-070import 
org.junit.runners.Parameterized.Parameter;
-071import 
org.junit.runners.Parameterized.Parameters;
-072
-073import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-074import 
org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
-075import 
org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
-076import 
org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
-077import 
org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
-078
-079@RunWith(Parameterized.class)
-080@Category({ MiscTests.class, 
LargeTests.class })
-081public class 
TestSaslFanOutOneBlockAsyncDFSOutput {
-082
-083  @ClassRule
-084  public static final HBaseClassTestRule 
CLASS_RULE =
-085  
HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class);
-086
-087  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+066import org.junit.Ignore;
+067import 
org.junit.experimental.categories.Category;
+068import org.junit.rules.TestName;
+069import org.junit.runner.RunWith;
+070import org.junit.runners.Parameterized;
+071import 
org.junit.runners.Parameterized.Parameter;
+072import 
org.junit.runners.Parameterized.Parameters;
+073
+074import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
+075import 
org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
+076import 
org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
+077import 
org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
+078import 
org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
+079
+080@RunWith(Parameterized.class)
+081@Category({ MiscTests.class, 
LargeTests.class })
+082@Ignore
+083public class 
TestSaslFanOutOneBlockAsyncDFSOutput {
+084
+085  @ClassRule
+086  public static final HBaseClassTestRule 
CLASS_RULE =
+087  
HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class);
 088
-089  private static DistributedFileSystem 
FS;
+089  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 090
-091  private static EventLoopGroup 
EVENT_LOOP_GROUP;
+091  private static DistributedFileSystem 
FS;
 092
-093  private static Class? extends 
Channel CHANNEL_CLASS;
+093  private static EventLoopGroup 
EVENT_LOOP_GROUP;
 094
-095  private static int READ_TIMEOUT_MS = 
20;
+095  private static Class? extends 
Channel CHANNEL_CLASS;
 096
-097  private static final File KEYTAB_FILE 
=
-098new 
File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath());
-099
-100  private static MiniKdc KDC;
+097  private static int READ_TIMEOUT_MS = 
20;
+098
+099  private static final File KEYTAB_FILE 
=
+100new 
File(TEST_UTIL.getDataTestDir("keytab").toUri().getPath());
 101
-102  private static String HOST = 
"localhost";
+102  private static MiniKdc KDC;
 103
-104  private static String USERNAME;
+104  private static String HOST = 
"localhost";
 105
-106  private static String PRINCIPAL;
+106  private static String USERNAME;
 107
-108  private static String HTTP_PRINCIPAL;
+108  private static String PRINCIPAL;
 109
-110  private static String TEST_KEY_NAME = 
"test_key";
+110  private static String HTTP_PRINCIPAL;
 111
-112  @Rule
-113  public TestName name = new 
TestName();
-114
-115  @Parameter(0)
-116  public String protection;
-117
-118  @Parameter(1)
-119  public String encryptionAlgorithm;
-120
-121  @Parameter(2)
-122  public String cipherSuite;
-123
-124  @Parameters(name = "{index}: 
protection={0}, encryption={1}, cipherSuite={2}")
-125  public static IterableObject[] 
data() {
-126ListObject[] params = new 
ArrayList();
-127for (String protection : 
Arrays.asList("authentication", "integrity", "privacy")) {
-128  for (String encryptionAlgorithm : 
Arrays.asList("", "3des", "rc4")) {
-129for (String cipherSuite : 
Arrays.asList("", CipherSuite.AES_CTR_NOPADDING.getName())) {
-130  

[35/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum 

[05/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,
-213/** number 

[25/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** 

[26/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {

[40/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index 377b938..6313ac8 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.TestOptions
+static class PerformanceEvaluation.TestOptions
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Wraps up options passed to PerformanceEvaluation.
  This makes tracking all these arguments a little easier.
@@ -191,118 +191,122 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 cycles
 
 
+(package private) int
+families
+
+
 (package private) boolean
 filterAll
 
-
+
 (package private) boolean
 flushCommits
 
-
+
 (package private) boolean
 inMemoryCF
 
-
+
 (package private) 
org.apache.hadoop.hbase.MemoryCompactionPolicy
 inMemoryCompaction
 
-
+
 (package private) int
 measureAfter
 
-
+
 (package private) int
 multiGet
 
-
+
 (package private) boolean
 nomapred
 
-
+
 (package private) int
 noOfTags
 
-
+
 (package private) int
 numClientThreads
 
-
+
 (package private) boolean
 oneCon
 
-
+
 (package private) int
 perClientRunRows
 
-
+
 (package private) int
 period
 
-
+
 (package private) int
 presplitRegions
 
-
+
 (package private) int
 randomSleep
 
-
+
 (package private) int
 replicas
 
-
+
 (package private) boolean
 reportLatency
 
-
+
 (package private) float
 sampleRate
 
-
+
 (package private) 
org.apache.hadoop.hbase.client.Scan.ReadType
 scanReadType
 
-
+
 (package private) float
 size
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 splitPolicy
 
-
+
 (package private) int
 startRow
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 tableName
 
-
+
 (package private) int
 totalRows
 
-
+
 (package private) double
 traceRate
 
-
+
 (package private) boolean
 useTags
 
-
+
 (package private) boolean
 valueRandom
 
-
+
 (package private) int
 valueSize
 
-
+
 (package private) boolean
 valueZipf
 
-
+
 (package private) boolean
 writeToWAL
 
@@ -384,274 +388,282 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 getCycles()
 
 
+int
+getFamilies()
+
+
 org.apache.hadoop.hbase.MemoryCompactionPolicy
 getInMemoryCompaction()
 
-
+
 int
 getMeasureAfter()
 
-
+
 int
 getMultiGet()
 
-
+
 int
 getNoOfTags()
 
-
+
 int
 getNumClientThreads()
 
-
+
 int
 getPerClientRunRows()
 
-
+
 int
 getPeriod()
 
-
+
 int
 getPresplitRegions()
 
-
+
 int
 getRandomSleep()
 
-
+
 int
 getReplicas()
 
-
+
 float
 getSampleRate()
 
-
+
 float
 getSize()
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[37/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212

[11/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212

[43/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 252788a..db00e1e 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2018 The Apache Software Foundation
 
   File: 3609,
- Errors: 15875,
+ Errors: 15879,
  Warnings: 0,
  Infos: 0
   
@@ -4605,7 +4605,7 @@ under the License.
   0
 
 
-  34
+  37
 
   
   
@@ -34579,7 +34579,7 @@ under the License.
   0
 
 
-  0
+  1
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/coc.html
--
diff --git a/coc.html b/coc.html
index 951de45..f8bfe45 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-07
+  Last Published: 
2018-05-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 24260bb..7c53d9b 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -440,7 +440,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-07
+  Last Published: 
2018-05-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index f76fb50..3f36672 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -1105,7 +1105,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-07
+  Last Published: 
2018-05-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 49d0a12..689be40 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -313,7 +313,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-07
+  Last Published: 
2018-05-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index ce06f8c..98d10b8 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -969,7 +969,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-07
+  Last Published: 
2018-05-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index 170b4f1..de81d02 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3768,14 +3768,14 @@
 
 publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 date
-"Mon May  7 14:39:14 UTC 2018"
+"Tue May  8 14:39:27 UTC 

[38/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.html
index d0285e1..66519d2 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestRegionServerAccounting
+public class TestRegionServerAccounting
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -133,6 +133,14 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 static HBaseClassTestRule
 CLASS_RULE
 
+
+private static 
org.apache.hadoop.conf.Configuration
+conf
+
+
+private static float
+DEFAULT_MEMSTORE_SIZE
+
 
 
 
@@ -167,26 +175,30 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize()
+setUpConf()
 
 
 void
-testOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize()
+testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize()
 
 
 void
-testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize()
+testOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize()
 
 
 void
-testOffheapMemstoreLowerWaterMarkLimitsDueToHeapSize()
+testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize()
 
 
 void
-testOnheapMemstoreHigherWaterMarkLimits()
+testOffheapMemstoreLowerWaterMarkLimitsDueToHeapSize()
 
 
 void
+testOnheapMemstoreHigherWaterMarkLimits()
+
+
+void
 testOnheapMemstoreLowerWaterMarkLimits()
 
 
@@ -214,10 +226,32 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-
+
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
+
+
+
+
+
+DEFAULT_MEMSTORE_SIZE
+private static finalfloat DEFAULT_MEMSTORE_SIZE
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+conf
+private staticorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -234,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestRegionServerAccounting
-publicTestRegionServerAccounting()
+publicTestRegionServerAccounting()
 
 
 
@@ -245,13 +279,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Method Detail
+
+
+
+
+
+setUpConf
+publicvoidsetUpConf()
+
+
 
 
 
 
 
 testOnheapMemstoreHigherWaterMarkLimits
-publicvoidtestOnheapMemstoreHigherWaterMarkLimits()
+publicvoidtestOnheapMemstoreHigherWaterMarkLimits()
 
 
 
@@ -260,7 +303,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testOnheapMemstoreLowerWaterMarkLimits
-publicvoidtestOnheapMemstoreLowerWaterMarkLimits()
+publicvoidtestOnheapMemstoreLowerWaterMarkLimits()
 
 
 
@@ -269,7 +312,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize
-publicvoidtestOffheapMemstoreHigherWaterMarkLimitsDueToDataSize()
+publicvoidtestOffheapMemstoreHigherWaterMarkLimitsDueToDataSize()
 
 
 
@@ -278,7 +321,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize
-publicvoidtestOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize()
+publicvoidtestOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize()
 
 
 
@@ -287,7 +330,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize
-publicvoidtestOffheapMemstoreLowerWaterMarkLimitsDueToDataSize()
+publicvoidtestOffheapMemstoreLowerWaterMarkLimitsDueToDataSize()
 
 
 
@@ -296,7 +339,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testOffheapMemstoreLowerWaterMarkLimitsDueToHeapSize
-publicvoidtestOffheapMemstoreLowerWaterMarkLimitsDueToHeapSize()
+publicvoidtestOffheapMemstoreLowerWaterMarkLimitsDueToHeapSize()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 

[36/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum 

[23/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,
-213 

[13/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {

[15/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange10Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange10Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange10Test.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange10Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange10Test.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */

[22/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211 

[08/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum 

[06/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212

[09/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {

[33/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  

[07/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,
-213

[20/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time 

[17/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.

[30/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212

[39/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
index b684b50..0653ad2 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -352,56 +352,60 @@ implements org.apache.hadoop.util.Tool
 DEFAULT_VALUE_LENGTH
 
 
-static byte[]
-FAMILY_NAME
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FAMILY_NAME_BASE
 
 
+static byte[]
+FAMILY_ZERO
+
+
 private static https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html?is-external=true;
 title="class or interface in java.text">DecimalFormat
 FMT
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 JOB_INPUT_FILENAME
 Each client has one mapper to do the work,  and client do 
the resulting count in a map task.
 
 
-
+
 private static org.slf4j.Logger
 LOG
 
-
+
 private static 
com.fasterxml.jackson.databind.ObjectMapper
 MAPPER
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/math/BigDecimal.html?is-external=true;
 title="class or interface in java.math">BigDecimal
 MS_PER_SEC
 
-
+
 private static int
 ONE_GB
 
-
+
 private static org.apache.hadoop.fs.Path
 PERF_EVAL_DIR
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_READ
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_SEEK_SCAN
 
-
+
 static int
 ROW_LENGTH
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_NAME
 
-
+
 private static int
 TAG_LENGTH
 
@@ -447,9 +451,10 @@ implements org.apache.hadoop.util.Tool
 
 
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-calculateMbps(introws,
+calculateMbps(introws,
  longtimeMs,
  intvalueSize,
+ intfamilies,
  intcolumns)
 Compute a throughput rate in MB/s.
 
@@ -670,13 +675,26 @@ implements org.apache.hadoop.util.Tool
 
 
 
-
+
+
+
+
+
+FAMILY_NAME_BASE
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FAMILY_NAME_BASE
+
+See Also:
+Constant
 Field Values
+
+
+
+
 
 
 
 
-FAMILY_NAME
-public static finalbyte[] FAMILY_NAME
+FAMILY_ZERO
+public static finalbyte[] FAMILY_ZERO
 
 
 
@@ -685,7 +703,7 @@ implements org.apache.hadoop.util.Tool
 
 
 COLUMN_ZERO
-public static finalbyte[] COLUMN_ZERO
+public static finalbyte[] COLUMN_ZERO
 
 
 
@@ -694,7 +712,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_VALUE_LENGTH
-public static finalint DEFAULT_VALUE_LENGTH
+public static finalint DEFAULT_VALUE_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -707,7 +725,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ROW_LENGTH
-public static finalint ROW_LENGTH
+public static finalint ROW_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -720,7 +738,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ONE_GB
-private static finalint ONE_GB
+private static finalint ONE_GB
 
 See Also:
 Constant
 Field Values
@@ -733,7 +751,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_ROWS_PER_GB
-private static finalint DEFAULT_ROWS_PER_GB
+private static finalint DEFAULT_ROWS_PER_GB
 
 See Also:
 Constant
 Field Values
@@ -746,7 +764,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TAG_LENGTH
-private static finalint TAG_LENGTH
+private static finalint TAG_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -759,7 +777,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FMT
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html?is-external=true;
 title="class or interface in java.text">DecimalFormat FMT
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html?is-external=true;
 title="class or interface in java.text">DecimalFormat FMT
 
 
 
@@ -768,7 +786,7 @@ implements org.apache.hadoop.util.Tool
 
 
 CXT
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/math/MathContext.html?is-external=true;
 title="class or interface in java.math">MathContext CXT
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/math/MathContext.html?is-external=true;
 title="class or interface in 

[19/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed 

[31/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed 

[04/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,

[02/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
index 7cf23c4..3f8844b 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated properties.
-209   */
-210  protected static enum Counter {
-211/** elapsed time */
-212ELAPSED_TIME,
-213/** number of rows */
-214ROWS
-215 

[18/45] hbase-site git commit: Published site at 1825af45b328cd54680c5c552f07bb12c4705fdb.

2018-05-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d70bb89e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
index 7cf23c4..3f8844b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
@@ -147,2627 +147,2705 @@
 139  }
 140
 141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-143  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-144  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-145  public static final int ROW_LENGTH = 
26;
-146
-147  private static final int ONE_GB = 1024 
* 1024 * 1000;
-148  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-149  // TODO : should we make this 
configurable
-150  private static final int TAG_LENGTH = 
256;
-151  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-152  private static final MathContext CXT = 
MathContext.DECIMAL64;
-153  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-154  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-155  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-156
-157  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-158  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-159
-160  static {
-161
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-162"Run async random read test");
-163
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-164"Run async random write test");
-165
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-166"Run async sequential read 
test");
-167
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-168"Run async sequential write 
test");
-169
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-170"Run async scan test (read every 
row)");
-171
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-172  "Run random read test");
-173
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-174  "Run random seek and scan 100 
test");
-175
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-176  "Run random seek scan with both 
start and stop row (max 10 rows)");
-177
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-178  "Run random seek scan with both 
start and stop row (max 100 rows)");
-179
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-180  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-181
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-182  "Run random seek scan with both 
start and stop row (max 1 rows)");
-183
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-184  "Run random write test");
-185
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-186  "Run sequential read test");
-187
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-188  "Run sequential write test");
-189addCommandDescriptor(ScanTest.class, 
"scan",
-190  "Run scan test (read every 
row)");
-191
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-192  "Run scan test using a filter to 
find a specific row based on it's value " +
-193  "(make sure to use --rows=20)");
-194
addCommandDescriptor(IncrementTest.class, "increment",
-195  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-196
addCommandDescriptor(AppendTest.class, "append",
-197  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-198
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-199  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-200
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",
-201  "CheckAndPut on each row; clients 
overlap on keyspace so some concurrent operations");
-202
addCommandDescriptor(CheckAndDeleteTest.class, "checkAndDelete",
-203  "CheckAndDelete on each row; 
clients overlap on keyspace so some concurrent operations");
-204  }
-205
-206  /**
-207   * Enum for map metrics.  Keep it out 
here rather than inside in the Map
-208   * inner-class so we can find 
associated 

[12/31] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

2018-05-08 Thread zhangduo
HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec519822
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec519822
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec519822

Branch: refs/heads/HBASE-19064
Commit: ec5198226a10d2241def862cfea017fe8971a07d
Parents: a9bf4eb
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 20 -
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 14 ++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 186 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ec519822/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index b1c1713..474ded3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -319,6 +319,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -376,6 +379,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec519822/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index e0d9a4c..97abc74 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -47,6 +47,8 @@ public class ReplicationPeerConfig {
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
   private final boolean serial;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -66,6 +68,7 @@ public class ReplicationPeerConfig {
 : null;
 this.bandwidth = builder.bandwidth;
 this.serial = builder.serial;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map
@@ -213,6 +216,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -230,7 +237,8 @@ public class ReplicationPeerConfig {
   .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
   .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
   .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-  
.setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial());
+  .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial())
+  .setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -259,6 +267,8 @@ public class ReplicationPeerConfig {
 
 private boolean serial = false;
 
+private String remoteWALDir = null;
+
 @Override
 public ReplicationPeerConfigBuilder setClusterKey(String 

[19/31] hbase git commit: HBASE-19990 Create remote wal directory when transitting to state S

2018-05-08 Thread zhangduo
HBASE-19990 Create remote wal directory when transitting to state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dff080b8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dff080b8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dff080b8

Branch: refs/heads/HBASE-19064
Commit: dff080b8cff1c02dc9ac0df3cd0914083dba6690
Parents: 262a089
Author: zhangduo 
Authored: Wed Feb 14 16:01:16 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../procedure2/ProcedureYieldException.java |  9 --
 .../hbase/replication/ReplicationUtils.java |  2 ++
 .../hadoop/hbase/master/MasterFileSystem.java   | 19 ++---
 .../master/procedure/MasterProcedureEnv.java|  5 
 ...ransitPeerSyncReplicationStateProcedure.java | 29 
 .../hbase/replication/TestSyncReplication.java  |  8 ++
 6 files changed, 55 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dff080b8/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
index 0487ac5b..dbb9981 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -15,16 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-// TODO: Not used yet
+/**
+ * Indicate that a procedure wants to be rescheduled. Usually because there 
are something wrong but
+ * we do not want to fail the procedure.
+ * 
+ * TODO: need to support scheduling after a delay.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class ProcedureYieldException extends ProcedureException {
+
   /** default constructor */
   public ProcedureYieldException() {
 super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/dff080b8/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index d94cb00..e402d0f 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -41,6 +41,8 @@ public final class ReplicationUtils {
 
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
+  public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/dff080b8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 864be02..7ccbd71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -133,7 +134,6 @@ public class MasterFileSystem {
* Idempotent.
*/
   private void createInitialFileSystemLayout() throws IOException {
-
 final String[] protectedSubDirs = new String[] {
 HConstants.BASE_NAMESPACE_DIR,
 HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -145,7 +145,8 @@ public class MasterFileSystem {
   HConstants.HREGION_LOGDIR_NAME,
   HConstants.HREGION_OLDLOGDIR_NAME,
   HConstants.CORRUPT_DIR_NAME,
-  WALProcedureStore.MASTER_PROCEDURE_LOGDIR
+  

[01/31] hbase git commit: HBASE-20508 TestIncrementalBackupWithBulkLoad doesn't need to be Parameterized test [Forced Update!]

2018-05-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 cb3ca05e4 -> 04c7be5da (forced update)


HBASE-20508 TestIncrementalBackupWithBulkLoad doesn't need to be Parameterized 
test

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e14e125
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e14e125
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e14e125

Branch: refs/heads/HBASE-19064
Commit: 5e14e125b2d930dbe29760d5872fd78e9c8299fe
Parents: 971f535
Author: maoling 
Authored: Sun May 6 15:59:21 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Mon May 7 09:36:44 2018 +0800

--
 .../backup/TestIncrementalBackupWithBulkLoad.java   | 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e14e125/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
index 34f732c..74dd569 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.backup;
 
 import static org.junit.Assert.assertTrue;
 
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -41,8 +39,6 @@ import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -57,7 +53,6 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * 6 Incremental backup t1
  */
 @Category(LargeTests.class)
-@RunWith(Parameterized.class)
 public class TestIncrementalBackupWithBulkLoad extends TestBackupBase {
 
   @ClassRule
@@ -66,17 +61,6 @@ public class TestIncrementalBackupWithBulkLoad extends 
TestBackupBase {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(TestIncrementalBackupDeleteTable.class);
 
-  @Parameterized.Parameters
-  public static Collection data() {
-secure = true;
-List params = new ArrayList<>();
-params.add(new Object[] {Boolean.TRUE});
-return params;
-  }
-
-  public TestIncrementalBackupWithBulkLoad(Boolean b) {
-  }
-
   // implement all test cases in 1 test since incremental backup/restore has 
dependencies
   @Test
   public void TestIncBackupDeleteTable() throws Exception {



[21/31] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-08 Thread zhangduo
HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2c2e9e1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2c2e9e1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2c2e9e1

Branch: refs/heads/HBASE-19064
Commit: b2c2e9e11aa4b5c6ef80acf385b0d263abb9f28f
Parents: a9f6157
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  63 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  11 --
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java |  89 +
 ...ransitPeerSyncReplicationStateProcedure.java | 181 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  52 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 743 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2c2e9e1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 997a155..cc7b4bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2c2e9e1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is 

[05/31] hbase git commit: HBASE-20536 Make TestRegionServerAccounting stable and it should not use absolute number

2018-05-08 Thread zhangduo
HBASE-20536 Make TestRegionServerAccounting stable and it should not use 
absolute number


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a2a76f6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a2a76f6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a2a76f6

Branch: refs/heads/HBASE-19064
Commit: 3a2a76f6f8c55ac644f2ef0a307689b09c341375
Parents: 992a5e8
Author: Guanghao Zhang 
Authored: Mon May 7 17:48:15 2018 +0800
Committer: Guanghao Zhang 
Committed: Tue May 8 08:20:48 2018 +0800

--
 .../TestRegionServerAccounting.java | 35 +++-
 1 file changed, 19 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2a76f6/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java
index 7bd9e16..fb246d5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -35,14 +36,22 @@ public class TestRegionServerAccounting {
   public static final HBaseClassTestRule CLASS_RULE =
   HBaseClassTestRule.forClass(TestRegionServerAccounting.class);
 
+  private final static float DEFAULT_MEMSTORE_SIZE = 0.2f;
+
+  private static Configuration conf;
+
+  @Before
+  public void setUpConf() {
+conf = HBaseConfiguration.create();
+conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, DEFAULT_MEMSTORE_SIZE);
+  }
+
   @Test
   public void testOnheapMemstoreHigherWaterMarkLimits() {
-Configuration conf = HBaseConfiguration.create();
-conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f);
-// try for default cases
 RegionServerAccounting regionServerAccounting = new 
RegionServerAccounting(conf);
+long dataSize = regionServerAccounting.getGlobalMemStoreLimit();
 MemStoreSize memstoreSize =
-new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 
1024L), 0);
+new MemStoreSize(dataSize, dataSize, 0);
 regionServerAccounting.incGlobalMemStoreSize(memstoreSize);
 assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK,
   regionServerAccounting.isAboveHighWaterMark());
@@ -50,12 +59,10 @@ public class TestRegionServerAccounting {
 
   @Test
   public void testOnheapMemstoreLowerWaterMarkLimits() {
-Configuration conf = HBaseConfiguration.create();
-conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f);
-// try for default cases
 RegionServerAccounting regionServerAccounting = new 
RegionServerAccounting(conf);
+long dataSize = regionServerAccounting.getGlobalMemStoreLimit();
 MemStoreSize memstoreSize =
-new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 
1024L), 0);
+new MemStoreSize(dataSize, dataSize, 0);
 regionServerAccounting.incGlobalMemStoreSize(memstoreSize);
 assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK,
   regionServerAccounting.isAboveLowWaterMark());
@@ -63,7 +70,6 @@ public class TestRegionServerAccounting {
 
   @Test
   public void testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize() {
-Configuration conf = HBaseConfiguration.create();
 // setting 1G as offheap data size
 conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L));
 // try for default cases
@@ -78,15 +84,14 @@ public class TestRegionServerAccounting {
 
   @Test
   public void testOffheapMemstoreHigherWaterMarkLimitsDueToHeapSize() {
-Configuration conf = HBaseConfiguration.create();
-conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f);
 // setting 1G as offheap data size
 conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L));
 // try for default cases
 RegionServerAccounting regionServerAccounting = new 
RegionServerAccounting(conf);
 // this will breach higher limit as heap size is higher and not due to 
offheap size
+long dataSize = regionServerAccounting.getGlobalOnHeapMemStoreLimit();
 MemStoreSize memstoreSize =
-new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 

[13/31] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-05-08 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ef1fd9de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ef1fd9de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ef1fd9de

Branch: refs/heads/HBASE-19064
Commit: ef1fd9deb654f4c9d9c01731760aab6e8d35d8af
Parents: ec51982
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   4 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ef1fd9de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ce8dafa..4816d77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -430,6 +430,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ef1fd9de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 0bee9d6..17133ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ef1fd9de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 

[16/31] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

2018-05-08 Thread zhangduo
HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/baf34a97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/baf34a97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/baf34a97

Branch: refs/heads/HBASE-19064
Commit: baf34a97a0d014d0f85428baef2cfc7e5356e0e4
Parents: 421d39b
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/baf34a97/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/baf34a97/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
-  

[03/31] hbase git commit: HBASE-20538 TestSaslFanOutOneBlockAsyncDFSOutput failing: UnrecoverableKeyException: Rejected by the jceks.key.serialFilter or jdk.serialFilter property Disable test TestSasl

2018-05-08 Thread zhangduo
HBASE-20538 TestSaslFanOutOneBlockAsyncDFSOutput failing: 
UnrecoverableKeyException: Rejected by the jceks.key.serialFilter or 
jdk.serialFilter property
Disable test TestSaslFanOutOneBlockAsyncDFSOutput for the moment.
Depends on HDFS-13494


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb1a9356
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb1a9356
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb1a9356

Branch: refs/heads/HBASE-19064
Commit: bb1a935636f4659631fb7b474716561fc5fff401
Parents: 8e6ff68
Author: Michael Stack 
Authored: Mon May 7 09:52:51 2018 -0700
Committer: Michael Stack 
Committed: Mon May 7 09:54:32 2018 -0700

--
 .../hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb1a9356/hbase-server/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
index a221a01..09b1d56 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
@@ -63,6 +63,7 @@ import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
@@ -78,6 +79,7 @@ import 
org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
 
 @RunWith(Parameterized.class)
 @Category({ MiscTests.class, LargeTests.class })
+@Ignore
 public class TestSaslFanOutOneBlockAsyncDFSOutput {
 
   @ClassRule



[28/31] hbase git commit: HBASE-20370 Also remove the wal file in remote cluster when we finish replicating a file

2018-05-08 Thread zhangduo
HBASE-20370 Also remove the wal file in remote cluster when we finish 
replicating a file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e53ab135
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e53ab135
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e53ab135

Branch: refs/heads/HBASE-19064
Commit: e53ab135ac4c94253b6c21dc0eb8993eb5e99265
Parents: 25e9421
Author: zhangduo 
Authored: Tue Apr 17 09:04:56 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |  36 ++-
 .../regionserver/ReplicationSource.java |  38 +++
 .../ReplicationSourceInterface.java |  21 +++-
 .../regionserver/ReplicationSourceManager.java  | 108 ++-
 .../regionserver/ReplicationSourceShipper.java  |  27 ++---
 .../hbase/wal/SyncReplicationWALProvider.java   |  11 +-
 .../replication/ReplicationSourceDummy.java |  20 ++--
 .../TestReplicationSourceManager.java   | 101 -
 8 files changed, 246 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e53ab135/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index cb22f57..66e9b01 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -22,14 +22,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class for replication.
@@ -37,6 +40,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationUtils.class);
+
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
@@ -176,4 +181,33 @@ public final class ReplicationUtils {
   return tableCFs != null && tableCFs.containsKey(tableName);
 }
   }
+
+  public static FileSystem getRemoteWALFileSystem(Configuration conf, String 
remoteWALDir)
+  throws IOException {
+return new Path(remoteWALDir).getFileSystem(conf);
+  }
+
+  public static Path getRemoteWALDirForPeer(String remoteWALDir, String 
peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
+  /**
+   * Do the sleeping logic
+   * @param msg Why we sleep
+   * @param sleepForRetries the base sleep time.
+   * @param sleepMultiplier by how many times the default sleeping time is 
augmented
+   * @param maxRetriesMultiplier the max retry multiplier
+   * @return True if sleepMultiplier is  
maxRetriesMultiplier
+   */
+  public static boolean sleepForRetries(String msg, long sleepForRetries, int 
sleepMultiplier,
+  int maxRetriesMultiplier) {
+try {
+  LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, 
sleepMultiplier);
+  Thread.sleep(sleepForRetries * sleepMultiplier);
+} catch (InterruptedException e) {
+  LOG.debug("Interrupted while sleeping between retries");
+  Thread.currentThread().interrupt();
+}
+return sleepMultiplier < maxRetriesMultiplier;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e53ab135/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index b05a673..01ccb11 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -89,8 +89,6 @@ public class ReplicationSource implements 

[31/31] hbase git commit: HBASE-20434 Also remove remote wals when peer is in DA state

2018-05-08 Thread zhangduo
HBASE-20434 Also remove remote wals when peer is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c0dfa45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c0dfa45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c0dfa45

Branch: refs/heads/HBASE-19064
Commit: 3c0dfa45430e25f6e0e7b4215cd0b1fb9d417834
Parents: 35af43b
Author: zhangduo 
Authored: Wed Apr 25 17:12:23 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  86 ++--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  19 ++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  30 +-
 .../TestSyncReplicationRemoveRemoteWAL.java | 101 +++
 .../TestReplicationSourceManager.java   |  68 -
 8 files changed, 251 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c0dfa45/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 66e9b01..069db7a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -191,6 +191,10 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir, peerId);
   }
 
+  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
   /**
* Do the sleeping logic
* @param msg Why we sleep

http://git-wip-us.apache.org/repos/asf/hbase/blob/3c0dfa45/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 5da2b0c..99fd615 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -211,7 +211,7 @@ public class TransitPeerSyncReplicationStateProcedure
   case CREATE_DIR_FOR_REMOTE_WAL:
 MasterFileSystem mfs = env.getMasterFileSystem();
 Path remoteWALDir = new Path(mfs.getWALRootDir(), 
ReplicationUtils.REMOTE_WAL_DIR_NAME);
-Path remoteWALDirForPeer = new Path(remoteWALDir, peerId);
+Path remoteWALDirForPeer = 
ReplicationUtils.getRemoteWALDirForPeer(remoteWALDir, peerId);
 FileSystem walFs = mfs.getWALFileSystem();
 try {
   if (walFs.exists(remoteWALDirForPeer)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3c0dfa45/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 1a27fc1..7313f13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -549,14 +549,17 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 }
 
 /**
+ * 
  * Split a path to get the start time
+ * 
+ * 
  * For example: 10.20.20.171%3A60020.1277499063250
+ * 
  * @param p path to split
  * @return start time
  */
 private static long getTS(Path p) {
-  int tsIndex = p.getName().lastIndexOf('.') + 1;
-  return Long.parseLong(p.getName().substring(tsIndex));
+  return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3c0dfa45/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java

[18/31] hbase git commit: HBASE-19935 Only allow table replication for sync replication for now

2018-05-08 Thread zhangduo
HBASE-19935 Only allow table replication for sync replication for now


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9f61577
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9f61577
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9f61577

Branch: refs/heads/HBASE-19064
Commit: a9f6157742883c482d3a8958ef6324843cc927e2
Parents: df0a7d1
Author: Guanghao Zhang 
Authored: Tue Feb 6 16:00:59 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  9 +++
 .../replication/ReplicationPeerManager.java | 34 -
 .../replication/TestReplicationAdmin.java   | 73 ++--
 .../wal/TestCombinedAsyncWriter.java|  6 ++
 .../wal/TestSyncReplicationWALProvider.java |  6 ++
 5 files changed, 102 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f61577/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 97abc74..997a155 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,6 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -220,6 +222,13 @@ public class ReplicationPeerConfig {
 return this.remoteWALDir;
   }
 
+  /**
+   * Use remote wal dir to decide whether a peer is sync replication peer
+   */
+  public boolean isSyncReplication() {
+return !StringUtils.isBlank(this.remoteWALDir);
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f61577/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index f07a0d8..ff778a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -170,7 +170,7 @@ public class ReplicationPeerManager {
   " does not match new remote wal dir '" + 
peerConfig.getRemoteWALDir() + "'");
 }
 
-if (oldPeerConfig.getRemoteWALDir() != null) {
+if (oldPeerConfig.isSyncReplication()) {
   if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldPeerConfig, 
peerConfig)) {
 throw new DoNotRetryIOException(
   "Changing the replicated namespace/table config on a synchronous 
replication " +
@@ -199,8 +199,8 @@ public class ReplicationPeerManager {
 }
 ReplicationPeerConfig copiedPeerConfig = 
ReplicationPeerConfig.newBuilder(peerConfig).build();
 SyncReplicationState syncReplicationState =
-StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? 
SyncReplicationState.NONE
-: SyncReplicationState.DOWNGRADE_ACTIVE;
+copiedPeerConfig.isSyncReplication() ? 
SyncReplicationState.DOWNGRADE_ACTIVE
+: SyncReplicationState.NONE;
 peerStorage.addPeer(peerId, copiedPeerConfig, enabled, 
syncReplicationState);
 peers.put(peerId,
   new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, 
syncReplicationState));
@@ -324,9 +324,37 @@ public class ReplicationPeerManager {
 peerConfig.getTableCFsMap());
 }
 
+if (peerConfig.isSyncReplication()) {
+  checkPeerConfigForSyncReplication(peerConfig);
+}
+
 checkConfiguredWALEntryFilters(peerConfig);
   }
 
+  private void checkPeerConfigForSyncReplication(ReplicationPeerConfig 
peerConfig)
+  throws DoNotRetryIOException {
+// This is used to reduce the difficulty for implementing the sync 
replication state transition
+// as we need to reopen all the related regions.
+// TODO: Add namespace, replicat_all flag back
+if (peerConfig.replicateAllUserTables()) {
+  throw new DoNotRetryIOException(
+

[02/31] hbase git commit: HBASE-20523 PE tool should support configuring client side buffering sizes (Ram)

2018-05-08 Thread zhangduo
HBASE-20523 PE tool should support configuring client side buffering sizes
(Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8e6ff689
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8e6ff689
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8e6ff689

Branch: refs/heads/HBASE-19064
Commit: 8e6ff689e806138e602a46730886d8c5f524fdcd
Parents: 5e14e12
Author: Vasudevan 
Authored: Mon May 7 12:50:24 2018 +0530
Committer: Vasudevan 
Committed: Mon May 7 12:57:20 2018 +0530

--
 .../hadoop/hbase/PerformanceEvaluation.java | 23 +++-
 .../hadoop/hbase/TestPerformanceEvaluation.java | 10 +
 2 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8e6ff689/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 7fc064f..42dc984 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.AsyncConnection;
 import org.apache.hadoop.hbase.client.AsyncTable;
 import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -666,6 +667,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 boolean asyncPrefetch = false;
 boolean cacheBlocks = true;
 Scan.ReadType scanReadType = Scan.ReadType.DEFAULT;
+long bufferSize = 2l * 1024l * 1024l;
 
 public TestOptions() {}
 
@@ -715,6 +717,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   this.asyncPrefetch = that.asyncPrefetch;
   this.cacheBlocks = that.cacheBlocks;
   this.scanReadType = that.scanReadType;
+  this.bufferSize = that.bufferSize;
 }
 
 public int getCaching() {
@@ -885,6 +888,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   this.valueSize = valueSize;
 }
 
+public void setBufferSize(long bufferSize) {
+  this.bufferSize = bufferSize;
+}
+
 public void setPeriod(int period) {
   this.period = period;
 }
@@ -1020,6 +1027,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 public MemoryCompactionPolicy getInMemoryCompaction() {
   return this.inMemoryCompaction;
 }
+
+public long getBufferSize() {
+  return this.bufferSize;
+}
   }
 
   /*
@@ -1626,7 +1637,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 @Override
 void onStartup() throws IOException {
-  this.mutator = 
connection.getBufferedMutator(TableName.valueOf(opts.tableName));
+  BufferedMutatorParams p = new 
BufferedMutatorParams(TableName.valueOf(opts.tableName));
+  p.writeBufferSize(opts.bufferSize);
+  this.mutator = connection.getBufferedMutator(p);
   this.table = connection.getTable(TableName.valueOf(opts.tableName));
 }
 
@@ -2363,6 +2376,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 System.err.println(" asyncPrefetch   Enable asyncPrefetch for scan");
 System.err.println(" cacheBlocks Set the cacheBlocks option for scan. 
Default: true");
 System.err.println(" scanReadTypeSet the readType option for scan, 
stream/pread/default. Default: default");
+System.err.println(" bufferSize  Set the value of client side 
buffering. Default: 2MB");
 System.err.println();
 System.err.println(" Note: -D properties will be applied to the conf used. 
");
 System.err.println("  For example: ");
@@ -2636,6 +2650,13 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase());
 continue;
   }
+
+  final String bufferSize = "--bufferSize=";
+  if (cmd.startsWith(bufferSize)) {
+opts.bufferSize = Long.parseLong(cmd.substring(bufferSize.length()));
+continue;
+  }
+
   if (isCommandClass(cmd)) {
 opts.cmdName = cmd;
 try {


[27/31] hbase git commit: HBASE-19943 Only allow removing sync replication peer which is in DA state

2018-05-08 Thread zhangduo
HBASE-19943 Only allow removing sync replication peer which is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ca90e96
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ca90e96
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ca90e96

Branch: refs/heads/HBASE-19064
Commit: 0ca90e9691aa813e71a823da5ccdd4589436ecd4
Parents: dff080b
Author: huzheng 
Authored: Thu Mar 1 18:34:02 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 14 -
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/TestReplicationAdmin.java   | 63 
 .../hbase/replication/TestSyncReplication.java  |  2 +-
 4 files changed, 78 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ca90e96/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 0dc922d..41dd6e3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -120,8 +120,20 @@ public class ReplicationPeerManager {
 return desc;
   }
 
+  private void checkPeerInDAStateIfSyncReplication(String peerId) throws 
DoNotRetryIOException {
+ReplicationPeerDescription desc = peers.get(peerId);
+if (desc != null && desc.getPeerConfig().isSyncReplication()
+&& 
!SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) {
+  throw new DoNotRetryIOException("Couldn't remove synchronous replication 
peer with state="
+  + desc.getSyncReplicationState()
+  + ", Transit the synchronous replication state to be 
DOWNGRADE_ACTIVE firstly.");
+}
+  }
+
   ReplicationPeerConfig preRemovePeer(String peerId) throws 
DoNotRetryIOException {
-return checkPeerExists(peerId).getPeerConfig();
+ReplicationPeerDescription pd = checkPeerExists(peerId);
+checkPeerInDAStateIfSyncReplication(peerId);
+return pd.getPeerConfig();
   }
 
   void preEnablePeer(String peerId) throws DoNotRetryIOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ca90e96/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index ac4b4cd..282aa21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -142,7 +142,7 @@ public class SyncReplicationWALProvider implements 
WALProvider, PeerActionListen
   @Override
   public WAL getWAL(RegionInfo region) throws IOException {
 if (region == null) {
-  return provider.getWAL(region);
+  return provider.getWAL(null);
 }
 Optional> peerIdAndRemoteWALDir =
   peerInfoProvider.getPeerIdAndRemoteWALDir(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ca90e96/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 0ad476f..486ab51 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -254,6 +254,62 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testRemovePeerWithNonDAState() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
+ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+
+String rootDir = "hdfs://srv1:/hbase";
+builder.setClusterKey(KEY_ONE);
+builder.setRemoteWALDir(rootDir);
+builder.setReplicateAllUserTables(false);
+Map tableCfs = new 

[09/31] hbase git commit: HBASE-20425 Do not write the cluster id of the current active cluster when writing remote WAL

2018-05-08 Thread zhangduo
HBASE-20425 Do not write the cluster id of the current active cluster when 
writing remote WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4dca018
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4dca018
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4dca018

Branch: refs/heads/HBASE-19064
Commit: e4dca0187be28ff1d21604cd8d0bbeb0c6bceb9e
Parents: 654ee49
Author: huzheng 
Authored: Mon Apr 23 17:20:55 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/TestSyncReplicationActive.java  | 32 
 1 file changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4dca018/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
index bff4572..f9020a0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
@@ -17,9 +17,17 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,6 +57,9 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 // peer is disabled so no data have been replicated
 verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
 
+// Ensure that there's no cluster id in remote log entries.
+verifyNoClusterIdInRemoteLog(UTIL2, PEER_ID);
+
 UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
   SyncReplicationState.DOWNGRADE_ACTIVE);
 // confirm that peer with state DA will reject replication request.
@@ -72,4 +83,25 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 verifyReplicationRequestRejection(UTIL2, true);
 write(UTIL2, 200, 300);
   }
+
+  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtility utility, 
String peerId)
+  throws Exception {
+FileSystem fs2 = utility.getTestFileSystem();
+Path remoteDir =
+new 
Path(utility.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(),
+"remoteWALs").makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory());
+FileStatus[] files = fs2.listStatus(new Path(remoteDir, peerId));
+Assert.assertTrue(files.length > 0);
+for (FileStatus file : files) {
+  try (Reader reader =
+  WALFactory.createReader(fs2, file.getPath(), 
utility.getConfiguration())) {
+Entry entry = reader.next();
+Assert.assertTrue(entry != null);
+while (entry != null) {
+  Assert.assertEquals(entry.getKey().getClusterIds().size(), 0);
+  entry = reader.next();
+}
+  }
+}
+  }
 }



[10/31] hbase git commit: HBASE-19973 Implement a procedure to replay sync replication wal for standby cluster

2018-05-08 Thread zhangduo
HBASE-19973 Implement a procedure to replay sync replication wal for standby 
cluster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/073a99a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/073a99a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/073a99a2

Branch: refs/heads/HBASE-19064
Commit: 073a99a2c30390678e0fd42abf1a33a1172f5044
Parents: 0ca90e9
Author: Guanghao Zhang 
Authored: Fri Mar 2 18:43:25 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  22 +++
 .../apache/hadoop/hbase/executor/EventType.java |   9 +-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +
 .../hadoop/hbase/master/MasterServices.java |   6 +
 .../procedure/PeerProcedureInterface.java   |   3 +-
 .../hbase/master/procedure/PeerQueue.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java| 114 +++
 .../ReplaySyncReplicationWALManager.java| 139 +
 .../ReplaySyncReplicationWALProcedure.java  | 193 +++
 .../hbase/regionserver/HRegionServer.java   |   9 +-
 .../ReplaySyncReplicationWALCallable.java   | 149 ++
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   5 +
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../master/TestRecoverStandbyProcedure.java | 186 ++
 16 files changed, 854 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/073a99a2/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index e8b940e..01e4dae 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -459,3 +459,25 @@ message TransitPeerSyncReplicationStateStateData {
   optional SyncReplicationState fromState = 1;
   required SyncReplicationState toState = 2;
 }
+
+enum RecoverStandbyState {
+  RENAME_SYNC_REPLICATION_WALS_DIR = 1;
+  INIT_WORKERS = 2;
+  DISPATCH_TASKS = 3;
+  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+}
+
+message RecoverStandbyStateData {
+  required string peer_id = 1;
+}
+
+message ReplaySyncReplicationWALStateData {
+  required string peer_id = 1;
+  required string wal = 2;
+  optional ServerName target_server = 3;
+}
+
+message ReplaySyncReplicationWALParameter {
+  required string peer_id = 1;
+  required string wal = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/073a99a2/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 922deb8..ad38d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -281,7 +281,14 @@ public enum EventType {
*
* RS_REFRESH_PEER
*/
-  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
+  RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER),
+
+  /**
+   * RS replay sync replication wal.
+   *
+   * RS_REPLAY_SYNC_REPLICATION_WAL
+   */
+  RS_REPLAY_SYNC_REPLICATION_WAL(85, 
ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/073a99a2/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 7f130d1..ea97354 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -47,7 +47,8 @@ public enum ExecutorType {
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
   RS_OPEN_PRIORITY_REGION(30),
-  RS_REFRESH_PEER   (31);
+  RS_REFRESH_PEER(31),
+  RS_REPLAY_SYNC_REPLICATION_WAL(32);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/073a99a2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

[26/31] hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

2018-05-08 Thread zhangduo
HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25e94212
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25e94212
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25e94212

Branch: refs/heads/HBASE-19064
Commit: 25e942123112192fc3d1e5498729fe03ac7b9b22
Parents: a299641
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25e94212/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ea72cfe..1865144 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1977,6 +1978,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1986,6 +1995,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/25e94212/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index af2f3b5..440a838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2472,7 +2472,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25e94212/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 

[14/31] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/421d39b8/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
 String peerId1 = "1";
 String peerId2 = "2";
 peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 for (int i = 0; i < 10; i++) {
   queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + 
i), peerId1,
 "file-" + i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/421d39b8/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index d1f1344..5f86365 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
 include Java
 
 java_import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
 java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -338,6 +339,20 @@ module Hbase
   '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
 end
 
+# Transit current cluster to a new state in the specified synchronous
+# replication peer
+def transit_peer_sync_replication_state(id, state)
+  if 'ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::ACTIVE)
+  elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::DOWNGRADE_ACTIVE)
+  elsif 'STANDBY'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::STANDBY)
+  else
+raise(ArgumentError, 'synchronous replication state must be ACTIVE, 
DOWNGRADE_ACTIVE or STANDBY')
+  end
+end
+
 
#--
 # Enables a table's replication switch
 def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/421d39b8/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index ab07a79..ba4d154 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -391,6 +391,7 @@ Shell.load_command_group(
 get_peer_config
 list_peer_configs
 update_peer_config
+transit_peer_sync_replication_state
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/421d39b8/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index f3ab749..f2ec014 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-REMOTE_ROOT_DIR STATE REPLICATE_ALL 
-NAMESPACES TABLE_CFS BANDWIDTH
+REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH

[23/31] hbase git commit: HBASE-20458 Support removing a WAL from LogRoller

2018-05-08 Thread zhangduo
HBASE-20458 Support removing a WAL from LogRoller


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7adb0555
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7adb0555
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7adb0555

Branch: refs/heads/HBASE-19064
Commit: 7adb0555eb720eb93d7694b02a4d3cf91413d107
Parents: 3c0dfa4
Author: Guanghao Zhang 
Authored: Mon Apr 23 16:31:54 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hadoop/hbase/regionserver/LogRoller.java| 29 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  7 +-
 .../regionserver/wal/WALClosedException.java| 47 ++
 .../hbase/regionserver/TestLogRoller.java   | 90 
 .../regionserver/wal/AbstractTestFSWAL.java |  9 ++
 5 files changed, 171 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7adb0555/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index 55c5219..ab0083f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALClosedException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -177,17 +180,24 @@ public class LogRoller extends HasThread implements 
Closeable {
   rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
   try {
 this.lastrolltime = now;
-for (Entry entry : walNeedsRoll.entrySet()) {
+for (Iterator> iter = 
walNeedsRoll.entrySet().iterator(); iter
+.hasNext();) {
+  Entry entry = iter.next();
   final WAL wal = entry.getKey();
   // Force the roll if the logroll.period is elapsed or if a roll was 
requested.
   // The returned value is an array of actual region names.
-  final byte [][] regionsToFlush = wal.rollWriter(periodic ||
-  entry.getValue().booleanValue());
-  walNeedsRoll.put(wal, Boolean.FALSE);
-  if (regionsToFlush != null) {
-for (byte[] r : regionsToFlush) {
-  scheduleFlush(r);
+  try {
+final byte[][] regionsToFlush =
+wal.rollWriter(periodic || entry.getValue().booleanValue());
+walNeedsRoll.put(wal, Boolean.FALSE);
+if (regionsToFlush != null) {
+  for (byte[] r : regionsToFlush) {
+scheduleFlush(r);
+  }
 }
+  } catch (WALClosedException e) {
+LOG.warn("WAL has been closed. Skipping rolling of writer and just 
remove it", e);
+iter.remove();
   }
 }
   } catch (FailedLogCloseException e) {
@@ -252,4 +262,9 @@ public class LogRoller extends HasThread implements 
Closeable {
 running = false;
 interrupt();
   }
+
+  @VisibleForTesting
+  Map getWalNeedsRoll() {
+return this.walNeedsRoll;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7adb0555/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 4816d77..2c0c72b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -750,15 +750,14 @@ public abstract class AbstractFSWAL 
implements WAL {
   public byte[][] rollWriter(boolean force) throws FailedLogCloseException, 
IOException 

[30/31] hbase git commit: HBASE-20432 Cleanup related resources when remove a sync replication peer

2018-05-08 Thread zhangduo
HBASE-20432 Cleanup related resources when remove a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56b7707d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56b7707d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56b7707d

Branch: refs/heads/HBASE-19064
Commit: 56b7707d4f80e64550c3de0fd103de23b86d7f50
Parents: 7adb055
Author: huzheng 
Authored: Wed Apr 18 20:38:33 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../master/replication/RemovePeerProcedure.java | 10 +
 .../ReplaySyncReplicationWALManager.java|  8 
 .../replication/SyncReplicationTestBase.java| 45 +---
 .../replication/TestSyncReplicationActive.java  |  9 ++--
 .../replication/TestSyncReplicationStandBy.java | 31 --
 5 files changed, 89 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56b7707d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
index 82dc07e..7335fe0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
@@ -66,9 +66,19 @@ public class RemovePeerProcedure extends ModifyPeerProcedure 
{
 env.getReplicationPeerManager().removePeer(peerId);
   }
 
+  private void removeRemoteWALs(MasterProcedureEnv env) throws IOException {
+ReplaySyncReplicationWALManager remoteWALManager =
+env.getMasterServices().getReplaySyncReplicationWALManager();
+remoteWALManager.removePeerRemoteWALs(peerId);
+remoteWALManager.removePeerReplayWALDir(peerId);
+  }
+
   @Override
   protected void postPeerModification(MasterProcedureEnv env)
   throws IOException, ReplicationException {
+if (peerConfig.isSyncReplication()) {
+  removeRemoteWALs(env);
+}
 env.getReplicationPeerManager().removeAllQueuesAndHFileRefs(peerId);
 if (peerConfig.isSerial()) {
   env.getReplicationPeerManager().removeAllLastPushedSeqIds(peerId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/56b7707d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
index 72f5c37..eac5aa4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
@@ -115,6 +115,14 @@ public class ReplaySyncReplicationWALManager {
 }
   }
 
+  public void removePeerRemoteWALs(String peerId) throws IOException {
+Path remoteWALDir = getPeerRemoteWALDir(peerId);
+if (fs.exists(remoteWALDir) && !fs.delete(remoteWALDir, true)) {
+  throw new IOException(
+  "Failed to remove remote WALs dir " + remoteWALDir + " for peer id=" 
+ peerId);
+}
+  }
+
   public void initPeerWorkers(String peerId) {
 BlockingQueue servers = new LinkedBlockingQueue<>();
 services.getServerManager().getOnlineServers().keySet()

http://git-wip-us.apache.org/repos/asf/hbase/blob/56b7707d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index 0d5fce8..de679be 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -71,6 +72,10 @@ public class SyncReplicationTestBase {
 
   protected static String PEER_ID = "1";
 
+  protected static Path remoteWALDir1;
+
+  

[06/31] hbase git commit: HBASE-20505 PE should support multi column family read and write cases

2018-05-08 Thread zhangduo
HBASE-20505 PE should support multi column family read and write cases


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1825af45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1825af45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1825af45

Branch: refs/heads/HBASE-19064
Commit: 1825af45b328cd54680c5c552f07bb12c4705fdb
Parents: 3a2a76f
Author: Andrew Purtell 
Authored: Fri May 4 18:57:21 2018 -0700
Committer: Andrew Purtell 
Committed: Mon May 7 18:39:02 2018 -0700

--
 .../hadoop/hbase/PerformanceEvaluation.java | 380 +++
 1 file changed, 229 insertions(+), 151 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1825af45/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 42dc984..33267e0 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -139,8 +139,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   public static final String TABLE_NAME = "TestTable";
-  public static final byte[] FAMILY_NAME = Bytes.toBytes("info");
-  public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0);
+  public static final String FAMILY_NAME_BASE = "info";
+  public static final byte[] FAMILY_ZERO = Bytes.toBytes("info0");
+  public static final byte[] COLUMN_ZERO = Bytes.toBytes("" + 0);
   public static final int DEFAULT_VALUE_LENGTH = 1000;
   public static final int ROW_LENGTH = 26;
 
@@ -347,11 +348,13 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 byte[][] splits = getSplits(opts);
 
 // recreate the table when user has requested presplit or when existing
-// {RegionSplitPolicy,replica count} does not match requested.
+// {RegionSplitPolicy,replica count} does not match requested, or when the
+// number of column families does not match requested.
 if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
   || (!isReadCmd && desc != null &&
   !StringUtils.equals(desc.getRegionSplitPolicyClassName(), 
opts.splitPolicy))
-  || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)) {
+  || (!isReadCmd && desc != null && desc.getRegionReplication() != 
opts.replicas)
+  || (desc != null && desc.getColumnFamilyCount() != opts.families)) {
   needsDelete = true;
   // wait, why did it delete my table?!?
   LOG.debug(MoreObjects.toStringHelper("needsDelete")
@@ -362,6 +365,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 .add("presplit", opts.presplitRegions)
 .add("splitPolicy", opts.splitPolicy)
 .add("replicas", opts.replicas)
+.add("families", opts.families)
 .toString());
 }
 
@@ -393,24 +397,27 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
* Create an HTableDescriptor from provided TestOptions.
*/
   protected static HTableDescriptor getTableDescriptor(TestOptions opts) {
-HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf(opts.tableName));
-HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME);
-family.setDataBlockEncoding(opts.blockEncoding);
-family.setCompressionType(opts.compression);
-family.setBloomFilterType(opts.bloomType);
-family.setBlocksize(opts.blockSize);
-if (opts.inMemoryCF) {
-  family.setInMemory(true);
-}
-family.setInMemoryCompaction(opts.inMemoryCompaction);
-desc.addFamily(family);
+HTableDescriptor tableDesc = new 
HTableDescriptor(TableName.valueOf(opts.tableName));
+for (int family = 0; family < opts.families; family++) {
+  byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family);
+  HColumnDescriptor familyDesc = new HColumnDescriptor(familyName);
+  familyDesc.setDataBlockEncoding(opts.blockEncoding);
+  familyDesc.setCompressionType(opts.compression);
+  familyDesc.setBloomFilterType(opts.bloomType);
+  familyDesc.setBlocksize(opts.blockSize);
+  if (opts.inMemoryCF) {
+familyDesc.setInMemory(true);
+  }
+  familyDesc.setInMemoryCompaction(opts.inMemoryCompaction);
+  tableDesc.addFamily(familyDesc);
+}
 if (opts.replicas != DEFAULT_OPTS.replicas) {
-  desc.setRegionReplication(opts.replicas);
+  tableDesc.setRegionReplication(opts.replicas);
 }
 if 

[11/31] hbase git commit: HBASE-19083 Introduce a new log writer which can write to two HDFSes

2018-05-08 Thread zhangduo
HBASE-19083 Introduce a new log writer which can write to two HDFSes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9bf4ebb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9bf4ebb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9bf4ebb

Branch: refs/heads/HBASE-19064
Commit: a9bf4ebb1557119ff27c8c9250d7dde17a2a8948
Parents: 1825af4
Author: zhangduo 
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  21 +--
 .../regionserver/wal/CombinedAsyncWriter.java   | 134 ++
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  67 +
 .../wal/AbstractTestProtobufLog.java| 110 +++
 .../regionserver/wal/ProtobufLogTestHelper.java |  99 ++
 .../regionserver/wal/TestAsyncProtobufLog.java  |  32 +
 .../wal/TestCombinedAsyncWriter.java| 136 +++
 .../hbase/regionserver/wal/TestProtobufLog.java |  14 +-
 .../regionserver/wal/WriterOverAsyncWriter.java |  63 +
 9 files changed, 533 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9bf4ebb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index e34818f..0bee9d6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -607,12 +607,16 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
-  @Override
-  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+  protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) 
throws IOException {
 return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, 
eventLoopGroup,
   channelClass);
   }
 
+  @Override
+  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+return createAsyncWriter(fs, path);
+  }
+
   private void waitForSafePoint() {
 consumeLock.lock();
 try {
@@ -632,13 +636,12 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
-  private long closeWriter() {
-AsyncWriter oldWriter = this.writer;
-if (oldWriter != null) {
-  long fileLength = oldWriter.getLength();
+  protected final long closeWriter(AsyncWriter writer) {
+if (writer != null) {
+  long fileLength = writer.getLength();
   closeExecutor.execute(() -> {
 try {
-  oldWriter.close();
+  writer.close();
 } catch (IOException e) {
   LOG.warn("close old writer failed", e);
 }
@@ -654,7 +657,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   throws IOException {
 Preconditions.checkNotNull(nextWriter);
 waitForSafePoint();
-long oldFileLen = closeWriter();
+long oldFileLen = closeWriter(this.writer);
 logRollAndSetupWalProps(oldPath, newPath, oldFileLen);
 this.writer = nextWriter;
 if (nextWriter instanceof AsyncProtobufLogWriter) {
@@ -679,7 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   @Override
   protected void doShutdown() throws IOException {
 waitForSafePoint();
-closeWriter();
+closeWriter(this.writer);
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9bf4ebb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
new file mode 100644
index 000..8ecfede
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[20/31] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/b2c2e9e1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
new file mode 100644
index 000..92f2c52
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the information for a sync replication peer.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerInfoProvider {
+
+  /**
+   * Return the peer id and remote WAL directory if the region is 
synchronously replicated and the
+   * state is {@link SyncReplicationState#ACTIVE}.
+   */
+  Optional> getPeerIdAndRemoteWALDir(RegionInfo info);
+
+  /**
+   * Check whether the give region is contained in a sync replication peer 
which is in the given
+   * state.
+   */
+  boolean isInState(RegionInfo info, SyncReplicationState state);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2c2e9e1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
new file mode 100644
index 000..32159e6
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class SyncReplicationPeerInfoProviderImpl implements 
SyncReplicationPeerInfoProvider {
+
+  private final ReplicationPeers replicationPeers;
+
+  private final SyncReplicationPeerMappingManager mapping;
+
+  SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers,
+  SyncReplicationPeerMappingManager mapping) {
+this.replicationPeers = replicationPeers;
+this.mapping = mapping;
+  }
+
+  @Override
+  public Optional> getPeerIdAndRemoteWALDir(RegionInfo 
info) {
+String peerId = mapping.getPeerId(info);
+if (peerId == null) {
+  return Optional.empty();
+   

[15/31] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-08 Thread zhangduo
HBASE-19781 Add a new cluster state flag for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/421d39b8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/421d39b8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/421d39b8

Branch: refs/heads/HBASE-19064
Commit: 421d39b8d273e75bed665bfb6eedb6e9e03d8e73
Parents: ef1fd9d
Author: Guanghao Zhang 
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  39 +
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   7 +
 .../hbase/client/ConnectionImplementation.java  |   9 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  26 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  15 ++
 .../client/ShortCircuitMasterConnection.java|   9 ++
 .../replication/ReplicationPeerConfigUtil.java  |  26 +--
 .../replication/ReplicationPeerDescription.java |  10 +-
 .../hbase/replication/SyncReplicationState.java |  48 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  10 ++
 .../src/main/protobuf/Master.proto  |   4 +
 .../src/main/protobuf/MasterProcedure.proto |   4 +
 .../src/main/protobuf/Replication.proto |  20 +++
 .../replication/ReplicationPeerStorage.java |  18 ++-
 .../hbase/replication/ReplicationUtils.java |   1 +
 .../replication/ZKReplicationPeerStorage.java   |  61 +--
 .../replication/TestReplicationStateBasic.java  |  23 ++-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../hbase/coprocessor/MasterObserver.java   |  23 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 ++
 .../hbase/master/MasterCoprocessorHost.java |  21 +++
 .../hadoop/hbase/master/MasterRpcServices.java  |  17 ++
 .../hadoop/hbase/master/MasterServices.java |   9 ++
 .../procedure/PeerProcedureInterface.java   |   2 +-
 .../replication/ReplicationPeerManager.java |  51 +-
 ...ransitPeerSyncReplicationStateProcedure.java | 159 +++
 .../hbase/security/access/AccessController.java |   8 +
 .../replication/TestReplicationAdmin.java   |  62 
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../cleaner/TestReplicationHFileCleaner.java|   4 +-
 .../TestReplicationTrackerZKImpl.java   |   6 +-
 .../TestReplicationSourceManager.java   |   3 +-
 .../security/access/TestAccessController.java   |  16 ++
 .../hbase/util/TestHBaseFsckReplication.java|   5 +-
 .../src/main/ruby/hbase/replication_admin.rb|  15 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   6 +-
 .../transit_peer_sync_replication_state.rb  |  44 +
 .../test/ruby/hbase/replication_admin_test.rb   |  24 +++
 40 files changed, 816 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/421d39b8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 331f2d1..39542e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2657,6 +2658,44 @@ public interface Admin extends Abortable, Closeable {
   List listReplicationPeers(Pattern pattern) 
throws IOException;
 
   /**
+   * Transit current cluster to a new state in a synchronous replication peer.
+   * @param peerId a short name that identifies the peer
+   * @param state a new state of current cluster
+   * @throws IOException if a remote or network exception occurs
+   */
+  void transitReplicationPeerSyncReplicationState(String peerId, 
SyncReplicationState state)
+  throws IOException;
+
+  /**
+   * Transit current cluster to a new state in a synchronous replication peer. 
But does not block
+   * and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the 

[24/31] hbase git commit: HBASE-19999 Remove the SYNC_REPLICATION_ENABLED flag

2018-05-08 Thread zhangduo
HBASE-1 Remove the SYNC_REPLICATION_ENABLED flag


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5953d149
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5953d149
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5953d149

Branch: refs/heads/HBASE-19064
Commit: 5953d149c69bad380f4a0bf99d81476079b48203
Parents: 073a99a
Author: Guanghao Zhang 
Authored: Fri Mar 9 11:30:25 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java  |  2 --
 .../hadoop/hbase/regionserver/HRegionServer.java | 13 -
 .../hbase/wal/SyncReplicationWALProvider.java| 19 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java  | 18 --
 .../hbase/replication/TestSyncReplication.java   |  1 -
 .../master/TestRecoverStandbyProcedure.java  |  2 --
 .../wal/TestSyncReplicationWALProvider.java  |  2 --
 7 files changed, 38 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5953d149/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e402d0f..cb22f57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,8 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";

http://git-wip-us.apache.org/repos/asf/hbase/blob/5953d149/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2fb4f67..af2f3b5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1804,10 +1804,8 @@ public class HRegionServer extends HasThread implements
   private void setupWALAndReplication() throws IOException {
 boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
   (!LoadBalancer.isTablesOnMaster(conf) || 
LoadBalancer.isSystemTablesOnlyOnMaster(conf));
-if (isMasterNoTableOrSystemTableOnly) {
-  conf.setBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false);
-}
-WALFactory factory = new WALFactory(conf, serverName.toString());
+WALFactory factory =
+new WALFactory(conf, serverName.toString(), 
!isMasterNoTableOrSystemTableOnly);
 if (!isMasterNoTableOrSystemTableOnly) {
   // TODO Replication make assumptions here based on the default 
filesystem impl
   Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -1926,11 +1924,8 @@ public class HRegionServer extends HasThread implements
 }
 this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
   conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
-
-if (conf.getBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false)) {
-  
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
-
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
2));
-}
+
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
+  
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
1));
 
 Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + 
".logRoller",
 uncaughtExceptionHandler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5953d149/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index 282aa21..54287fe 100644
--- 

[08/31] hbase git commit: HBASE-19782 Reject the replication request when peer is DA or A state

2018-05-08 Thread zhangduo
HBASE-19782 Reject the replication request when peer is DA or A state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/654ee498
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/654ee498
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/654ee498

Branch: refs/heads/HBASE-19064
Commit: 654ee498e3ca4b65c689e254c87f311bc21c8b29
Parents: e53ab13
Author: huzheng 
Authored: Fri Mar 2 18:05:29 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/protobuf/ReplicationProtbufUtil.java  |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  2 +-
 .../hbase/regionserver/HRegionServer.java   |  5 +--
 .../hbase/regionserver/RSRpcServices.java   | 25 +--
 .../RejectReplicationRequestStateChecker.java   | 45 
 .../ReplaySyncReplicationWALCallable.java   | 24 ++-
 .../replication/regionserver/Replication.java   |  2 +-
 .../regionserver/ReplicationSink.java   | 16 +++
 .../SyncReplicationPeerInfoProvider.java| 11 ++---
 .../SyncReplicationPeerInfoProviderImpl.java| 13 +++---
 .../SyncReplicationPeerMappingManager.java  |  5 +--
 .../hbase/wal/SyncReplicationWALProvider.java   |  7 +--
 .../replication/SyncReplicationTestBase.java| 32 ++
 .../replication/TestSyncReplicationActive.java  | 13 +-
 .../regionserver/TestReplicationSink.java   |  5 +--
 .../regionserver/TestWALEntrySinkFilter.java|  3 +-
 .../wal/TestSyncReplicationWALProvider.java |  6 +--
 17 files changed, 163 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/654ee498/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 81dd59e..e01f881 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminServic
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {

http://git-wip-us.apache.org/repos/asf/hbase/blob/654ee498/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 1865144..cb7ba6e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1981,7 +1981,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private boolean shouldForbidMajorCompaction() {
 if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
   return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
-  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+  .checkState(getRegionInfo().getTable(), 
ForbidMajorCompactionChecker.get());
 }
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654ee498/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 440a838..ab571c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2478,10 +2478,9 @@ public class 

[17/31] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

2018-05-08 Thread zhangduo
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df0a7d17
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df0a7d17
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df0a7d17

Branch: refs/heads/HBASE-19064
Commit: df0a7d177106c9f306c16880c3792e41f383ee5e
Parents: baf34a9
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 +++---
 .../hbase/replication/SyncReplicationState.java | 17 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 +++
 .../replication/ZKReplicationPeerStorage.java   | 24 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 
 ...ransitPeerSyncReplicationStateProcedure.java |  9 
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/df0a7d17/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 6cbe05b..331795c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -403,7 +403,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -411,17 +411,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/df0a7d17/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 

[25/31] hbase git commit: HBASE-19079 Support setting up two clusters with A and S stat

2018-05-08 Thread zhangduo
HBASE-19079 Support setting up two clusters with A and S stat


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a299641f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a299641f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a299641f

Branch: refs/heads/HBASE-19064
Commit: a299641fa6bfa5934ead500f33977fb945aad1fb
Parents: 5953d14
Author: zhangduo 
Authored: Tue Apr 10 22:35:19 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  14 ++
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  25 ++-
 .../hbase/replication/ChainWALEntryFilter.java  |  28 +--
 .../ReplaySyncReplicationWALCallable.java   |  27 ++-
 .../SyncReplicationPeerInfoProviderImpl.java|   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  10 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |  94 ++---
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   8 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   2 +-
 .../replication/TestReplicationAdmin.java   |  33 +--
 .../regionserver/wal/TestWALDurability.java |   2 +
 .../replication/SyncReplicationTestBase.java| 185 +
 .../hbase/replication/TestSyncReplication.java  | 207 ---
 .../replication/TestSyncReplicationActive.java  |  64 ++
 .../replication/TestSyncReplicationStandBy.java |  96 +
 17 files changed, 521 insertions(+), 287 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a299641f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 41dd6e3..229549e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -68,8 +68,9 @@ public class ReplicationPeerManager {
 
   private final ImmutableMap
 allowedTransition = 
Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.STANDBY,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.DOWNGRADE_ACTIVE,
+  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE, 
SyncReplicationState.STANDBY),
+  SyncReplicationState.STANDBY, 
EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE),
+  SyncReplicationState.DOWNGRADE_ACTIVE,
   EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
 
   ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a299641f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index cc51890..5da2b0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -171,7 +171,7 @@ public class TransitPeerSyncReplicationStateProcedure
 }
 return Flow.HAS_MORE_STATE;
   case REPLAY_REMOTE_WAL_IN_PEER:
-// TODO: replay remote wal when transiting from S to DA.
+addChildProcedure(new RecoverStandbyProcedure(peerId));
 
setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:

http://git-wip-us.apache.org/repos/asf/hbase/blob/a299641f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 0495337..a98567a 100644
--- 

[07/31] hbase git commit: HBASE-19082 Reject read/write from client but accept write from replication in state S

2018-05-08 Thread zhangduo
HBASE-19082 Reject read/write from client but accept write from replication in 
state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/262a0894
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/262a0894
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/262a0894

Branch: refs/heads/HBASE-19064
Commit: 262a0894b658d1d5e4f86f434715c945f20cac83
Parents: b2c2e9e
Author: zhangduo 
Authored: Mon Feb 12 18:20:18 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/protobuf/MasterProcedure.proto |   3 +-
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |  10 +
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/regionserver/RSRpcServices.java   |  88 ++--
 .../RejectRequestsFromClientStateChecker.java   |  44 
 .../regionserver/ReplicationSink.java   |  72 ---
 .../SyncReplicationPeerInfoProvider.java|  10 +-
 .../SyncReplicationPeerInfoProviderImpl.java|  19 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |   3 +
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   4 +-
 .../hbase/replication/TestSyncReplication.java  | 200 +++
 .../wal/TestSyncReplicationWALProvider.java |   8 +-
 15 files changed, 401 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/262a0894/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 522c2cf..9241682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1355,9 +1355,6 @@ public final class HConstants {
 
   public static final String NOT_IMPLEMENTED = "Not implemented";
 
-  // TODO: need to find a better place to hold it.
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/262a0894/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 67c1b43..e8b940e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -397,7 +397,8 @@ enum PeerSyncReplicationStateTransitionState {
   REOPEN_ALL_REGIONS_IN_PEER = 5;
   TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+  CREATE_DIR_FOR_REMOTE_WAL = 8;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/262a0894/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e4dea83..d94cb00 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,6 +37,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
+
+  public static final String REPLICATION_ATTR_NAME = "__rep__";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/262a0894/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8fc932f..69404a0 100644
--- 

[29/31] hbase git commit: HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group

2018-05-08 Thread zhangduo
HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/35af43be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/35af43be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/35af43be

Branch: refs/heads/HBASE-19064
Commit: 35af43be31bb8a9a5a97625a72f089f8d37c8fbc
Parents: e4dca01
Author: zhangduo 
Authored: Tue Apr 24 22:01:21 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  1 +
 .../RecoveredReplicationSource.java | 13 +---
 .../RecoveredReplicationSourceShipper.java  |  7 --
 .../regionserver/ReplicationSource.java | 13 +++-
 .../regionserver/ReplicationSourceManager.java  | 19 -
 .../regionserver/ReplicationSourceShipper.java  | 20 +++--
 .../ReplicationSourceWALReader.java |  9 ++-
 .../regionserver/WALEntryStream.java|  3 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java | 28 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 10 ++-
 .../TestReplicationSourceManager.java   |  5 +-
 .../TestSyncReplicationShipperQuit.java | 81 
 .../regionserver/TestWALEntryStream.java|  4 +-
 13 files changed, 163 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/35af43be/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 17133ed..f630e63 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -682,6 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   protected void doShutdown() throws IOException {
 waitForSafePoint();
 closeWriter(this.writer);
+this.writer = null;
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/35af43be/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index a21ca44..f1bb538 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -144,15 +143,9 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   void tryFinish() {
-// use synchronize to make sure one last thread will clean the queue
-synchronized (workerThreads) {
-  Threads.sleep(100);// wait a short while for other worker thread to 
fully exit
-  boolean allTasksDone = workerThreads.values().stream().allMatch(w -> 
w.isFinished());
-  if (allTasksDone) {
-this.getSourceMetrics().clear();
-manager.removeRecoveredSource(this);
-LOG.info("Finished recovering queue {} with the following stats: {}", 
queueId, getStats());
-  }
+if (workerThreads.isEmpty()) {
+  this.getSourceMetrics().clear();
+  manager.finishRecoveredSource(this);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/35af43be/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
index 91109cf..b0d4db0 100644
--- 

[04/31] hbase git commit: HBASE-20537 The download link is not available in the downloads webpage

2018-05-08 Thread zhangduo
HBASE-20537 The download link is not available in the downloads webpage


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/992a5e8e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/992a5e8e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/992a5e8e

Branch: refs/heads/HBASE-19064
Commit: 992a5e8e4932bdc6e498a4a036387137701df57b
Parents: bb1a935
Author: Michael Stack 
Authored: Mon May 7 10:02:09 2018 -0700
Committer: Michael Stack 
Committed: Mon May 7 10:02:09 2018 -0700

--
 src/site/xdoc/downloads.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/992a5e8e/src/site/xdoc/downloads.xml
--
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index ee0e7b3..25bca02 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -59,8 +59,8 @@ under the License.
 https://git-wip-us.apache.org/repos/asf?p=hbase.git;a=commit;h=7483b111e4da77adbfc8062b3b22cbe7c2cb91c1;>7483b111e4da77adbfc8062b3b22cbe7c2cb91c1
   
   
-http://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-src.tar.gz;>
 (http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.sha512;>sha512
 http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.asc;>asc) 

-http://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-bin.tar.gz;>
 (http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.sha512;>sha512
 http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.asc;>asc)
+http://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-src.tar.gz;>src
 (http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.sha512;>sha512
 http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.asc;>asc) 

+http://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-bin.tar.gz;>bin
 (http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.sha512;>sha512
 http://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.asc;>asc)
   
 
   



[22/31] hbase git commit: HBASE-20426 Give up replicating anything in S state

2018-05-08 Thread zhangduo
HBASE-20426 Give up replicating anything in S state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04c7be5d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04c7be5d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04c7be5d

Branch: refs/heads/HBASE-19064
Commit: 04c7be5da8e829eb69ddfa03c19994be847b8dc2
Parents: 56b7707
Author: zhangduo 
Authored: Thu May 3 15:51:35 2018 +0800
Committer: zhangduo 
Committed: Tue May 8 20:15:45 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  13 +-
 .../replication/AbstractPeerProcedure.java  |   4 +
 .../master/replication/ModifyPeerProcedure.java |   6 -
 .../replication/ReplicationPeerManager.java |  13 +-
 ...ransitPeerSyncReplicationStateProcedure.java |  94 +++
 .../hadoop/hbase/regionserver/LogRoller.java|  11 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  63 --
 .../regionserver/ReplicationSource.java |   1 +
 .../regionserver/ReplicationSourceManager.java  | 118 ---
 .../TestDrainReplicationQueuesForStandBy.java   | 118 +++
 10 files changed, 379 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/04c7be5d/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 01e4dae..f15cb04 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -394,11 +394,14 @@ enum PeerSyncReplicationStateTransitionState {
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
   REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REOPEN_ALL_REGIONS_IN_PEER = 5;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  CREATE_DIR_FOR_REMOTE_WAL = 8;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
+  REOPEN_ALL_REGIONS_IN_PEER = 6;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
+  CREATE_DIR_FOR_REMOTE_WAL = 11;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/04c7be5d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 6679d78..458e073 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -106,4 +106,8 @@ public abstract class AbstractPeerProcedure
 throw new UnsupportedOperationException();
   }
 
+  protected final void refreshPeer(MasterProcedureEnv env, PeerOperationType 
type) {
+
addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+  .map(sn -> new RefreshPeerProcedure(peerId, type, 
sn)).toArray(RefreshPeerProcedure[]::new));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/04c7be5d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 32b8ea1..56462ca 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -108,12 +108,6 @@ public abstract class ModifyPeerProcedure extends 
AbstractPeerProcedure new RefreshPeerProcedure(peerId, type, sn))
-  .toArray(RefreshPeerProcedure[]::new));
-  }
-
   protected ReplicationPeerConfig getOldPeerConfig() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/04c7be5d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java