[hbase] branch branch-2.1 updated: HBASE-20305 adding options to skip deletes/puts on target when running SyncTable

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 5a987ec  HBASE-20305 adding options to skip deletes/puts on target 
when running SyncTable
5a987ec is described below

commit 5a987ec682cf0349c0f886b6d95607e291cdcdaf
Author: wellington 
AuthorDate: Wed Mar 28 22:12:01 2018 +0100

HBASE-20305 adding options to skip deletes/puts on target when running 
SyncTable

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 
---
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  36 ++-
 .../hadoop/hbase/mapreduce/TestSyncTable.java  | 248 -
 2 files changed, 272 insertions(+), 12 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index dab84c4..1bb9969 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -64,7 +64,9 @@ public class SyncTable extends Configured implements Tool {
   static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = 
"sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = 
"sync.table.target.zk.cluster";
-  static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
+  static final String DRY_RUN_CONF_KEY = "sync.table.dry.run";
+  static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes";
+  static final String DO_PUTS_CONF_KEY = "sync.table.do.puts";
 
   Path sourceHashDir;
   String sourceTableName;
@@ -73,6 +75,8 @@ public class SyncTable extends Configured implements Tool {
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
+  boolean doDeletes = true;
+  boolean doPuts = true;
 
   Counters counters;
 
@@ -144,6 +148,8 @@ public class SyncTable extends Configured implements Tool {
   initCredentialsForHBase(targetZkCluster, job);
 }
 jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
+jobConf.setBoolean(DO_DELETES_CONF_KEY, doDeletes);
+jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts);
 
 TableMapReduceUtil.initTableMapperJob(targetTableName, 
tableHash.initScan(),
 SyncMapper.class, null, null, job);
@@ -178,6 +184,8 @@ public class SyncTable extends Configured implements Tool {
 Table sourceTable;
 Table targetTable;
 boolean dryRun;
+boolean doDeletes = true;
+boolean doPuts = true;
 
 HashTable.TableHash sourceTableHash;
 HashTable.TableHash.Reader sourceHashReader;
@@ -201,7 +209,9 @@ public class SyncTable extends Configured implements Tool {
   TableOutputFormat.OUTPUT_CONF_PREFIX);
   sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
   targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
-  dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
+  dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false);
+  doDeletes = conf.getBoolean(DO_DELETES_CONF_KEY, true);
+  doPuts = conf.getBoolean(DO_PUTS_CONF_KEY, true);
 
   sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
   LOG.info("Read source hash manifest: " + sourceTableHash);
@@ -488,7 +498,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doPuts) {
 if (put == null) {
   put = new Put(rowKey);
 }
@@ -503,7 +513,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doDeletes) {
 if (delete == null) {
   delete = new Delete(rowKey);
 }
@@ -530,7 +540,7 @@ public class SyncTable extends Configured implements Tool {
 context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
 matchingRow = false;
 
-if (!dryRun) {
+if (!dryRun && doPuts) {
   // overwrite target cell
   if (put == null) {
 put = new Put(rowKey);
@@ -711,6 +721,10 @@ public class SyncTable extends Configured implements Tool {
 System.err.println("  (defaults to cluster in classpath's 
config)");
 System.err.println(" dryrun   if true, output counters but no 
writes");
 System.err.println("  (defaults to false)");
+System.err.println(" doDeletesif false, does not perform deletes");
+System.err.println("  

[hbase] branch branch-1.4 updated: HBASE-20305 Add option to SyncTable that skip deletes on target cluster

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 90a7c77  HBASE-20305 Add option to SyncTable that skip deletes on 
target cluster
90a7c77 is described below

commit 90a7c77b4f680a51c10dabc2829bee1bed95aab3
Author: wellington 
AuthorDate: Wed Mar 28 22:12:01 2018 +0100

HBASE-20305 Add option to SyncTable that skip deletes on target cluster

Change-Id: Iccbcd4a7e7ed176d8404cb2ab17e3e47663e0441

Signed-off-by: Andrew Purtell 
---
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  40 ++-
 .../hadoop/hbase/mapreduce/TestSyncTable.java  | 280 +++--
 2 files changed, 290 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index ddb169e..31d5172 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -63,7 +63,9 @@ public class SyncTable extends Configured implements Tool {
   static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = 
"sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = 
"sync.table.target.zk.cluster";
-  static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
+  static final String DRY_RUN_CONF_KEY = "sync.table.dry.run";
+  static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes";
+  static final String DO_PUTS_CONF_KEY = "sync.table.do.puts";
 
   Path sourceHashDir;
   String sourceTableName;
@@ -72,6 +74,8 @@ public class SyncTable extends Configured implements Tool {
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
+  boolean doDeletes = true;
+  boolean doPuts = true;
 
   Counters counters;
 
@@ -143,6 +147,8 @@ public class SyncTable extends Configured implements Tool {
   initCredentialsForHBase(targetZkCluster, job);
 }
 jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
+jobConf.setBoolean(DO_DELETES_CONF_KEY, doDeletes);
+jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts);
 
 TableMapReduceUtil.initTableMapperJob(targetTableName, 
tableHash.initScan(),
 SyncMapper.class, null, null, job);
@@ -177,6 +183,8 @@ public class SyncTable extends Configured implements Tool {
 Table sourceTable;
 Table targetTable;
 boolean dryRun;
+boolean doDeletes = true;
+boolean doPuts = true;
 
 HashTable.TableHash sourceTableHash;
 HashTable.TableHash.Reader sourceHashReader;
@@ -200,7 +208,9 @@ public class SyncTable extends Configured implements Tool {
   TableOutputFormat.OUTPUT_CONF_PREFIX);
   sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
   targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
-  dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
+  dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false);
+  doDeletes = conf.getBoolean(DO_DELETES_CONF_KEY, true);
+  doPuts = conf.getBoolean(DO_PUTS_CONF_KEY, true);
 
   sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
   LOG.info("Read source hash manifest: " + sourceTableHash);
@@ -487,7 +497,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doPuts) {
 if (put == null) {
   put = new Put(rowKey);
 }
@@ -502,8 +512,8 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
-if (delete == null) {
+  if (!dryRun && doDeletes) {
+   if (delete == null) {
   delete = new Delete(rowKey);
 }
 // add a tombstone to exactly match the target cell that is 
missing on the source
@@ -529,8 +539,8 @@ public class SyncTable extends Configured implements Tool {
 context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
 matchingRow = false;
 
-if (!dryRun) {
-  // overwrite target cell
+if (!dryRun && doPuts) {
+ // overwrite target cell
   if (put == null) {
 put = new Put(rowKey);
   }
@@ -709,6 +719,10 @@ public class SyncTable extends Configured implements Tool {
 System.err.println("  (defaults to cluster in classpath's 
config)");
 System.err.println(" dryrun   if true, output counters but no 
writes");
 

[hbase] branch branch-2.2 updated: HBASE-20305 adding options to skip deletes/puts on target when running SyncTable

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 89caa6b  HBASE-20305 adding options to skip deletes/puts on target 
when running SyncTable
89caa6b is described below

commit 89caa6b07a35a3eda2885f647eaab450572431d9
Author: wellington 
AuthorDate: Wed Mar 28 22:12:01 2018 +0100

HBASE-20305 adding options to skip deletes/puts on target when running 
SyncTable

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 
---
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  36 ++-
 .../hadoop/hbase/mapreduce/TestSyncTable.java  | 248 -
 2 files changed, 272 insertions(+), 12 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index dab84c4..1bb9969 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -64,7 +64,9 @@ public class SyncTable extends Configured implements Tool {
   static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = 
"sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = 
"sync.table.target.zk.cluster";
-  static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
+  static final String DRY_RUN_CONF_KEY = "sync.table.dry.run";
+  static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes";
+  static final String DO_PUTS_CONF_KEY = "sync.table.do.puts";
 
   Path sourceHashDir;
   String sourceTableName;
@@ -73,6 +75,8 @@ public class SyncTable extends Configured implements Tool {
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
+  boolean doDeletes = true;
+  boolean doPuts = true;
 
   Counters counters;
 
@@ -144,6 +148,8 @@ public class SyncTable extends Configured implements Tool {
   initCredentialsForHBase(targetZkCluster, job);
 }
 jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
+jobConf.setBoolean(DO_DELETES_CONF_KEY, doDeletes);
+jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts);
 
 TableMapReduceUtil.initTableMapperJob(targetTableName, 
tableHash.initScan(),
 SyncMapper.class, null, null, job);
@@ -178,6 +184,8 @@ public class SyncTable extends Configured implements Tool {
 Table sourceTable;
 Table targetTable;
 boolean dryRun;
+boolean doDeletes = true;
+boolean doPuts = true;
 
 HashTable.TableHash sourceTableHash;
 HashTable.TableHash.Reader sourceHashReader;
@@ -201,7 +209,9 @@ public class SyncTable extends Configured implements Tool {
   TableOutputFormat.OUTPUT_CONF_PREFIX);
   sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
   targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
-  dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
+  dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false);
+  doDeletes = conf.getBoolean(DO_DELETES_CONF_KEY, true);
+  doPuts = conf.getBoolean(DO_PUTS_CONF_KEY, true);
 
   sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
   LOG.info("Read source hash manifest: " + sourceTableHash);
@@ -488,7 +498,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doPuts) {
 if (put == null) {
   put = new Put(rowKey);
 }
@@ -503,7 +513,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doDeletes) {
 if (delete == null) {
   delete = new Delete(rowKey);
 }
@@ -530,7 +540,7 @@ public class SyncTable extends Configured implements Tool {
 context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
 matchingRow = false;
 
-if (!dryRun) {
+if (!dryRun && doPuts) {
   // overwrite target cell
   if (put == null) {
 put = new Put(rowKey);
@@ -711,6 +721,10 @@ public class SyncTable extends Configured implements Tool {
 System.err.println("  (defaults to cluster in classpath's 
config)");
 System.err.println(" dryrun   if true, output counters but no 
writes");
 System.err.println("  (defaults to false)");
+System.err.println(" doDeletesif false, does not perform deletes");
+System.err.println("  

[hbase] branch branch-2 updated: HBASE-20305 adding options to skip deletes/puts on target when running SyncTable

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new d4438ae  HBASE-20305 adding options to skip deletes/puts on target 
when running SyncTable
d4438ae is described below

commit d4438ae88669e1b3218de19fd5c1d0bd57f8f85d
Author: wellington 
AuthorDate: Wed Mar 28 22:12:01 2018 +0100

HBASE-20305 adding options to skip deletes/puts on target when running 
SyncTable

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 
---
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  36 ++-
 .../hadoop/hbase/mapreduce/TestSyncTable.java  | 248 -
 2 files changed, 272 insertions(+), 12 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index dab84c4..1bb9969 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -64,7 +64,9 @@ public class SyncTable extends Configured implements Tool {
   static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = 
"sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = 
"sync.table.target.zk.cluster";
-  static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
+  static final String DRY_RUN_CONF_KEY = "sync.table.dry.run";
+  static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes";
+  static final String DO_PUTS_CONF_KEY = "sync.table.do.puts";
 
   Path sourceHashDir;
   String sourceTableName;
@@ -73,6 +75,8 @@ public class SyncTable extends Configured implements Tool {
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
+  boolean doDeletes = true;
+  boolean doPuts = true;
 
   Counters counters;
 
@@ -144,6 +148,8 @@ public class SyncTable extends Configured implements Tool {
   initCredentialsForHBase(targetZkCluster, job);
 }
 jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
+jobConf.setBoolean(DO_DELETES_CONF_KEY, doDeletes);
+jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts);
 
 TableMapReduceUtil.initTableMapperJob(targetTableName, 
tableHash.initScan(),
 SyncMapper.class, null, null, job);
@@ -178,6 +184,8 @@ public class SyncTable extends Configured implements Tool {
 Table sourceTable;
 Table targetTable;
 boolean dryRun;
+boolean doDeletes = true;
+boolean doPuts = true;
 
 HashTable.TableHash sourceTableHash;
 HashTable.TableHash.Reader sourceHashReader;
@@ -201,7 +209,9 @@ public class SyncTable extends Configured implements Tool {
   TableOutputFormat.OUTPUT_CONF_PREFIX);
   sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
   targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
-  dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
+  dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false);
+  doDeletes = conf.getBoolean(DO_DELETES_CONF_KEY, true);
+  doPuts = conf.getBoolean(DO_PUTS_CONF_KEY, true);
 
   sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
   LOG.info("Read source hash manifest: " + sourceTableHash);
@@ -488,7 +498,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doPuts) {
 if (put == null) {
   put = new Put(rowKey);
 }
@@ -503,7 +513,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doDeletes) {
 if (delete == null) {
   delete = new Delete(rowKey);
 }
@@ -530,7 +540,7 @@ public class SyncTable extends Configured implements Tool {
 context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
 matchingRow = false;
 
-if (!dryRun) {
+if (!dryRun && doPuts) {
   // overwrite target cell
   if (put == null) {
 put = new Put(rowKey);
@@ -711,6 +721,10 @@ public class SyncTable extends Configured implements Tool {
 System.err.println("  (defaults to cluster in classpath's 
config)");
 System.err.println(" dryrun   if true, output counters but no 
writes");
 System.err.println("  (defaults to false)");
+System.err.println(" doDeletesif false, does not perform deletes");
+System.err.println("  (defaults to 

[hbase] branch branch-1 updated: HBASE-20305 Add option to SyncTable that skip deletes on target cluster

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 299af7a  HBASE-20305 Add option to SyncTable that skip deletes on 
target cluster
299af7a is described below

commit 299af7ad7719e8a924b61fbbb76e795a60c84e28
Author: wellington 
AuthorDate: Wed Mar 28 22:12:01 2018 +0100

HBASE-20305 Add option to SyncTable that skip deletes on target cluster

Change-Id: Iccbcd4a7e7ed176d8404cb2ab17e3e47663e0441

Signed-off-by: Andrew Purtell 
---
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  40 ++-
 .../hadoop/hbase/mapreduce/TestSyncTable.java  | 280 +++--
 2 files changed, 290 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index ddb169e..31d5172 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -63,7 +63,9 @@ public class SyncTable extends Configured implements Tool {
   static final String TARGET_TABLE_CONF_KEY = "sync.table.target.table.name";
   static final String SOURCE_ZK_CLUSTER_CONF_KEY = 
"sync.table.source.zk.cluster";
   static final String TARGET_ZK_CLUSTER_CONF_KEY = 
"sync.table.target.zk.cluster";
-  static final String DRY_RUN_CONF_KEY="sync.table.dry.run";
+  static final String DRY_RUN_CONF_KEY = "sync.table.dry.run";
+  static final String DO_DELETES_CONF_KEY = "sync.table.do.deletes";
+  static final String DO_PUTS_CONF_KEY = "sync.table.do.puts";
 
   Path sourceHashDir;
   String sourceTableName;
@@ -72,6 +74,8 @@ public class SyncTable extends Configured implements Tool {
   String sourceZkCluster;
   String targetZkCluster;
   boolean dryRun;
+  boolean doDeletes = true;
+  boolean doPuts = true;
 
   Counters counters;
 
@@ -143,6 +147,8 @@ public class SyncTable extends Configured implements Tool {
   initCredentialsForHBase(targetZkCluster, job);
 }
 jobConf.setBoolean(DRY_RUN_CONF_KEY, dryRun);
+jobConf.setBoolean(DO_DELETES_CONF_KEY, doDeletes);
+jobConf.setBoolean(DO_PUTS_CONF_KEY, doPuts);
 
 TableMapReduceUtil.initTableMapperJob(targetTableName, 
tableHash.initScan(),
 SyncMapper.class, null, null, job);
@@ -177,6 +183,8 @@ public class SyncTable extends Configured implements Tool {
 Table sourceTable;
 Table targetTable;
 boolean dryRun;
+boolean doDeletes = true;
+boolean doPuts = true;
 
 HashTable.TableHash sourceTableHash;
 HashTable.TableHash.Reader sourceHashReader;
@@ -200,7 +208,9 @@ public class SyncTable extends Configured implements Tool {
   TableOutputFormat.OUTPUT_CONF_PREFIX);
   sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
   targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
-  dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
+  dryRun = conf.getBoolean(DRY_RUN_CONF_KEY, false);
+  doDeletes = conf.getBoolean(DO_DELETES_CONF_KEY, true);
+  doPuts = conf.getBoolean(DO_PUTS_CONF_KEY, true);
 
   sourceTableHash = HashTable.TableHash.read(conf, sourceHashDir);
   LOG.info("Read source hash manifest: " + sourceTableHash);
@@ -487,7 +497,7 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.TARGETMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
+  if (!dryRun && doPuts) {
 if (put == null) {
   put = new Put(rowKey);
 }
@@ -502,8 +512,8 @@ public class SyncTable extends Configured implements Tool {
   context.getCounter(Counter.SOURCEMISSINGCELLS).increment(1);
   matchingRow = false;
 
-  if (!dryRun) {
-if (delete == null) {
+  if (!dryRun && doDeletes) {
+   if (delete == null) {
   delete = new Delete(rowKey);
 }
 // add a tombstone to exactly match the target cell that is 
missing on the source
@@ -529,8 +539,8 @@ public class SyncTable extends Configured implements Tool {
 context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1);
 matchingRow = false;
 
-if (!dryRun) {
-  // overwrite target cell
+if (!dryRun && doPuts) {
+ // overwrite target cell
   if (put == null) {
 put = new Put(rowKey);
   }
@@ -709,6 +719,10 @@ public class SyncTable extends Configured implements Tool {
 System.err.println("  (defaults to cluster in classpath's 
config)");
 System.err.println(" dryrun   if true, output counters but no 
writes");
 

[hbase] branch branch-1.3 updated: HBASE-22391 Fix flaky tests from TestFromClientSide

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.3 by this push:
 new d1cb1f5  HBASE-22391 Fix flaky tests from TestFromClientSide
d1cb1f5 is described below

commit d1cb1f5f101c7dec10c5bf6c8e532de02ff33f5f
Author: Xu Cang 
AuthorDate: Thu May 9 17:04:20 2019 -0700

HBASE-22391 Fix flaky tests from TestFromClientSide

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
---
 .../hadoop/hbase/client/TestFromClientSide.java| 151 -
 1 file changed, 118 insertions(+), 33 deletions(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 1b03600..0887b61 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -191,7 +191,7 @@ public class TestFromClientSide {
 kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, 
"2000");
 
hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(),
 null, 1, kvs);
 TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close();
-
+TEST_UTIL.waitTableAvailable(hdt.getTableName(), 1);
 Configuration c = new Configuration(TEST_UTIL.getConfiguration());
 c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50);
 // Client will retry beacuse rpc timeout is small than the sleep time of 
first rpc call
@@ -243,6 +243,7 @@ public class TestFromClientSide {
  HTableDescriptor desc = new HTableDescriptor(TABLENAME);
  desc.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
+ TEST_UTIL.waitTableAvailable(desc.getTableName(), 1);
  Configuration c = TEST_UTIL.getConfiguration();
  Table h = new HTable(c, TABLENAME);
 
@@ -305,7 +306,7 @@ public class TestFromClientSide {
  final byte[] VALUE = Bytes.toBytes("value");
 
  Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
+ TEST_UTIL.waitTableAvailable(TABLENAME, 1);
  // future timestamp
  long ts = System.currentTimeMillis() * 2;
  Put put = new Put(ROW, ts);
@@ -448,6 +449,7 @@ public class TestFromClientSide {
 byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
 Configuration conf = TEST_UTIL.getConfiguration();
 Table table = TEST_UTIL.createTable(TABLE, FAMILIES, conf);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 assertSame(conf, table.getConfiguration());
   }
 
@@ -464,6 +466,7 @@ public class TestFromClientSide {
 Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"),
 Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
 HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 String value = "this is the value";
 String value2 = "this is some other value";
 String keyPrefix1 = UUID.randomUUID().toString();
@@ -594,6 +597,7 @@ public class TestFromClientSide {
   throws IOException, InterruptedException {
 TableName name = TableName.valueOf("testFilterAcrossMutlipleRegions");
 HTable t = TEST_UTIL.createTable(name, FAMILY);
+TEST_UTIL.waitTableAvailable(name, 1);
 int rowCount = TEST_UTIL.loadTable(t, FAMILY, false);
 assertRowCount(t, rowCount);
 // Split the table.  Should split on a reasonable key; 'lqj'
@@ -712,6 +716,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.throwOnce.set(true); // throw exceptions 
only once
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -736,6 +741,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.isDoNotRetry.set(true);
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -763,6 +769,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 

[hbase] branch branch-1 updated: HBASE-22391 Fix flaky tests from TestFromClientSide

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 0df3875  HBASE-22391 Fix flaky tests from TestFromClientSide
0df3875 is described below

commit 0df3875746e649943266943352d3d9c444a2fece
Author: Xu Cang 
AuthorDate: Thu May 9 17:04:20 2019 -0700

HBASE-22391 Fix flaky tests from TestFromClientSide

Signed-off-by: Andrew Purtell 
---
 .../hadoop/hbase/client/TestFromClientSide.java| 160 -
 1 file changed, 125 insertions(+), 35 deletions(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 254d8f2..228c1c7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -193,7 +193,7 @@ public class TestFromClientSide {
 kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, 
"2000");
 
hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(),
 null, 1, kvs);
 TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close();
-
+TEST_UTIL.waitTableAvailable(hdt.getTableName(), 1);
 Configuration c = new Configuration(TEST_UTIL.getConfiguration());
 c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50);
 // Client will retry beacuse rpc timeout is small than the sleep time of 
first rpc call
@@ -245,6 +245,7 @@ public class TestFromClientSide {
  HTableDescriptor desc = new HTableDescriptor(TABLENAME);
  desc.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
+ TEST_UTIL.waitTableAvailable(desc.getTableName(), 1);
  Configuration c = TEST_UTIL.getConfiguration();
  Table h = new HTable(c, TABLENAME);
 
@@ -307,7 +308,7 @@ public class TestFromClientSide {
  final byte[] VALUE = Bytes.toBytes("value");
 
  Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
+ TEST_UTIL.waitTableAvailable(TABLENAME, 1);
  // future timestamp
  long ts = System.currentTimeMillis() * 2;
  Put put = new Put(ROW, ts);
@@ -450,6 +451,7 @@ public class TestFromClientSide {
 byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
 Configuration conf = TEST_UTIL.getConfiguration();
 Table table = TEST_UTIL.createTable(TABLE, FAMILIES, conf);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 assertSame(conf, table.getConfiguration());
   }
 
@@ -466,6 +468,7 @@ public class TestFromClientSide {
 Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"),
 Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
 HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 String value = "this is the value";
 String value2 = "this is some other value";
 String keyPrefix1 = UUID.randomUUID().toString();
@@ -596,6 +599,7 @@ public class TestFromClientSide {
   throws IOException, InterruptedException {
 TableName name = TableName.valueOf("testFilterAcrossMutlipleRegions");
 HTable t = TEST_UTIL.createTable(name, FAMILY);
+TEST_UTIL.waitTableAvailable(name, 1);
 int rowCount = TEST_UTIL.loadTable(t, FAMILY, false);
 assertRowCount(t, rowCount);
 // Split the table.  Should split on a reasonable key; 'lqj'
@@ -714,6 +718,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.throwOnce.set(true); // throw exceptions 
only once
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -738,6 +743,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.isDoNotRetry.set(true);
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -765,6 +771,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.throwOnce.set(false); // 

[hbase] branch branch-1.4 updated: HBASE-22391 Fix flaky tests from TestFromClientSide

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new a151bc0  HBASE-22391 Fix flaky tests from TestFromClientSide
a151bc0 is described below

commit a151bc03b703f95923c598f2058fe035146da0f6
Author: Xu Cang 
AuthorDate: Thu May 9 17:04:20 2019 -0700

HBASE-22391 Fix flaky tests from TestFromClientSide

Signed-off-by: Andrew Purtell 
---
 .../hadoop/hbase/client/TestFromClientSide.java| 160 -
 1 file changed, 125 insertions(+), 35 deletions(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 254d8f2..228c1c7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -193,7 +193,7 @@ public class TestFromClientSide {
 kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, 
"2000");
 
hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(),
 null, 1, kvs);
 TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close();
-
+TEST_UTIL.waitTableAvailable(hdt.getTableName(), 1);
 Configuration c = new Configuration(TEST_UTIL.getConfiguration());
 c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50);
 // Client will retry beacuse rpc timeout is small than the sleep time of 
first rpc call
@@ -245,6 +245,7 @@ public class TestFromClientSide {
  HTableDescriptor desc = new HTableDescriptor(TABLENAME);
  desc.addFamily(hcd);
  TEST_UTIL.getHBaseAdmin().createTable(desc);
+ TEST_UTIL.waitTableAvailable(desc.getTableName(), 1);
  Configuration c = TEST_UTIL.getConfiguration();
  Table h = new HTable(c, TABLENAME);
 
@@ -307,7 +308,7 @@ public class TestFromClientSide {
  final byte[] VALUE = Bytes.toBytes("value");
 
  Table table = TEST_UTIL.createTable(TABLENAME, FAMILY);
-
+ TEST_UTIL.waitTableAvailable(TABLENAME, 1);
  // future timestamp
  long ts = System.currentTimeMillis() * 2;
  Put put = new Put(ROW, ts);
@@ -450,6 +451,7 @@ public class TestFromClientSide {
 byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
 Configuration conf = TEST_UTIL.getConfiguration();
 Table table = TEST_UTIL.createTable(TABLE, FAMILIES, conf);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 assertSame(conf, table.getConfiguration());
   }
 
@@ -466,6 +468,7 @@ public class TestFromClientSide {
 Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"),
 Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
 HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+TEST_UTIL.waitTableAvailable(TABLE, 1);
 String value = "this is the value";
 String value2 = "this is some other value";
 String keyPrefix1 = UUID.randomUUID().toString();
@@ -596,6 +599,7 @@ public class TestFromClientSide {
   throws IOException, InterruptedException {
 TableName name = TableName.valueOf("testFilterAcrossMutlipleRegions");
 HTable t = TEST_UTIL.createTable(name, FAMILY);
+TEST_UTIL.waitTableAvailable(name, 1);
 int rowCount = TEST_UTIL.loadTable(t, FAMILY, false);
 assertRowCount(t, rowCount);
 // Split the table.  Should split on a reasonable key; 'lqj'
@@ -714,6 +718,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.throwOnce.set(true); // throw exceptions 
only once
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -738,6 +743,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 ExceptionInReseekRegionObserver.isDoNotRetry.set(true);
 try (Table t = TEST_UTIL.getConnection().getTable(name)) {
@@ -765,6 +771,7 @@ public class TestFromClientSide {
 HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name, FAMILY);
 htd.addCoprocessor(ExceptionInReseekRegionObserver.class.getName());
 TEST_UTIL.getHBaseAdmin().createTable(htd);
+TEST_UTIL.waitTableAvailable(htd.getTableName(), 1);
 ExceptionInReseekRegionObserver.reset();
 

[hbase] branch branch-2 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 9a1ed0b  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
9a1ed0b is described below

commit 9a1ed0b9dc7c73792cb5c67736491c86cdb72cf1
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../regionserver/DumpReplicationQueues.java|  2 +
 .../regionserver/TestDumpReplicationQueues.java| 98 ++
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  2 +
 3 files changed, 102 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index a960c31..432dbcd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -325,6 +326,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   for (String queueId : queueIds) {
 ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
 List wals = queueStorage.getWALsInQueue(regionserver, queueId);
+Collections.sort(wals);
 if (!peerIds.contains(queueInfo.getPeerId())) {
   deletedQueues.add(regionserver + "/" + queueId);
   sb.append(formatQueue(regionserver, queueStorage, queueInfo, 
queueId, wals, true, hdfs));
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
new file mode 100644
index 000..f49f9b9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests for DumpReplicationQueues tool
+ */
+@Category({ ReplicationTests.class, SmallTests.class})
+public class TestDumpReplicationQueues {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestDumpReplicationQueues.class);
+
+  /**
+   * Makes sure dumpQueues returns wals znodes ordered chronologically.
+   * @throws Exception if dumpqueues finds any error while handling list of 
znodes.
+   */
+  @Test
+  public void testDumpReplicationReturnsWalSorted() throws Exception {
+Configuration config = HBaseConfiguration.create();
+ZKWatcher zkWatcherMock = 

[hbase] branch branch-2.1 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 696298b  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
696298b is described below

commit 696298bface4faf918b4ac85f0d69f43a16aa06b
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../regionserver/DumpReplicationQueues.java|  2 +
 .../regionserver/TestDumpReplicationQueues.java| 98 ++
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  2 +
 3 files changed, 102 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index a960c31..432dbcd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -325,6 +326,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   for (String queueId : queueIds) {
 ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
 List wals = queueStorage.getWALsInQueue(regionserver, queueId);
+Collections.sort(wals);
 if (!peerIds.contains(queueInfo.getPeerId())) {
   deletedQueues.add(regionserver + "/" + queueId);
   sb.append(formatQueue(regionserver, queueStorage, queueInfo, 
queueId, wals, true, hdfs));
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
new file mode 100644
index 000..f49f9b9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests for DumpReplicationQueues tool
+ */
+@Category({ ReplicationTests.class, SmallTests.class})
+public class TestDumpReplicationQueues {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestDumpReplicationQueues.class);
+
+  /**
+   * Makes sure dumpQueues returns wals znodes ordered chronologically.
+   * @throws Exception if dumpqueues finds any error while handling list of 
znodes.
+   */
+  @Test
+  public void testDumpReplicationReturnsWalSorted() throws Exception {
+Configuration config = HBaseConfiguration.create();
+ZKWatcher zkWatcherMock = 

[hbase] branch master updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new fab2e15  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
fab2e15 is described below

commit fab2e15ae43c08792f557d03a8a68f1e671bea9b
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../regionserver/DumpReplicationQueues.java|  2 +
 .../regionserver/TestDumpReplicationQueues.java| 98 ++
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  2 +
 3 files changed, 102 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index a960c31..432dbcd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -325,6 +326,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   for (String queueId : queueIds) {
 ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
 List wals = queueStorage.getWALsInQueue(regionserver, queueId);
+Collections.sort(wals);
 if (!peerIds.contains(queueInfo.getPeerId())) {
   deletedQueues.add(regionserver + "/" + queueId);
   sb.append(formatQueue(regionserver, queueStorage, queueInfo, 
queueId, wals, true, hdfs));
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
new file mode 100644
index 000..f49f9b9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests for DumpReplicationQueues tool
+ */
+@Category({ ReplicationTests.class, SmallTests.class})
+public class TestDumpReplicationQueues {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestDumpReplicationQueues.class);
+
+  /**
+   * Makes sure dumpQueues returns wals znodes ordered chronologically.
+   * @throws Exception if dumpqueues finds any error while handling list of 
znodes.
+   */
+  @Test
+  public void testDumpReplicationReturnsWalSorted() throws Exception {
+Configuration config = HBaseConfiguration.create();
+ZKWatcher zkWatcherMock = 

[hbase] branch branch-1.3 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.3 by this push:
 new a4a6f79  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
a4a6f79 is described below

commit a4a6f79be82f03f2779cbf05302f1f17e58825f5
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../hadoop/hbase/replication/regionserver/DumpReplicationQueues.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index c374541..1a51175 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
-import org.mortbay.util.IO;
 
 import com.google.common.util.concurrent.AtomicLongMap;
 
@@ -318,6 +317,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
 for (String queueId : queueIds) {
   ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
   List wals = queuesClient.getLogsInQueue(regionserver, 
queueId);
+  Collections.sort(wals);
   if (!peerIds.contains(queueInfo.getPeerId())) {
 deletedQueues.add(regionserver + "/" + queueId);
 sb.append(formatQueue(regionserver, replicationQueues, queueInfo, 
queueId, wals, true, hdfs));



[hbase] branch branch-2.2 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new f8278b4  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
f8278b4 is described below

commit f8278b4e90ea2b4f8ab7e1050c52c657b93ae07b
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../regionserver/DumpReplicationQueues.java|  2 +
 .../regionserver/TestDumpReplicationQueues.java| 98 ++
 .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java  |  2 +
 3 files changed, 102 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index a960c31..432dbcd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -325,6 +326,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   for (String queueId : queueIds) {
 ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
 List wals = queueStorage.getWALsInQueue(regionserver, queueId);
+Collections.sort(wals);
 if (!peerIds.contains(queueInfo.getPeerId())) {
   deletedQueues.add(regionserver + "/" + queueId);
   sb.append(formatQueue(regionserver, queueStorage, queueInfo, 
queueId, wals, true, hdfs));
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
new file mode 100644
index 000..f49f9b9
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests for DumpReplicationQueues tool
+ */
+@Category({ ReplicationTests.class, SmallTests.class})
+public class TestDumpReplicationQueues {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestDumpReplicationQueues.class);
+
+  /**
+   * Makes sure dumpQueues returns wals znodes ordered chronologically.
+   * @throws Exception if dumpqueues finds any error while handling list of 
znodes.
+   */
+  @Test
+  public void testDumpReplicationReturnsWalSorted() throws Exception {
+Configuration config = HBaseConfiguration.create();
+ZKWatcher zkWatcherMock = 

[hbase] branch branch-1 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 46eb56a  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
46eb56a is described below

commit 46eb56a51b508188da0e2aba19c45bb28abd5f49
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../hadoop/hbase/replication/regionserver/DumpReplicationQueues.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 631b6c8..6c78963 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
-import org.mortbay.util.IO;
 
 import com.google.common.util.concurrent.AtomicLongMap;
 
@@ -319,6 +318,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
 for (String queueId : queueIds) {
   ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
   List wals = queuesClient.getLogsInQueue(regionserver, 
queueId);
+  Collections.sort(wals);
   if (!peerIds.contains(queueInfo.getPeerId())) {
 deletedQueues.add(regionserver + "/" + queueId);
 sb.append(formatQueue(regionserver, replicationQueues, queueInfo, 
queueId, wals, true, hdfs));



[hbase] branch branch-1.4 updated: HBASE-21784 Dump replication queue should show list of wal files ordered chronologically

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 61eb6fc  HBASE-21784 Dump replication queue should show list of wal 
files ordered chronologically
61eb6fc is described below

commit 61eb6fc79dab019aa074e14a9590ef4ffa25cf6a
Author: Wellington Chevreuil 

AuthorDate: Mon May 13 20:30:01 2019 +0100

HBASE-21784 Dump replication queue should show list of wal files ordered 
chronologically

Change-Id: I18c372406290e2b1e2b5503e2c87adcb9bf6fe91

Signed-off-by: Andrew Purtell 
---
 .../hadoop/hbase/replication/regionserver/DumpReplicationQueues.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 631b6c8..6c78963 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
-import org.mortbay.util.IO;
 
 import com.google.common.util.concurrent.AtomicLongMap;
 
@@ -319,6 +318,7 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
 for (String queueId : queueIds) {
   ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
   List wals = queuesClient.getLogsInQueue(regionserver, 
queueId);
+  Collections.sort(wals);
   if (!peerIds.contains(queueInfo.getPeerId())) {
 deletedQueues.add(regionserver + "/" + queueId);
 sb.append(formatQueue(regionserver, replicationQueues, queueInfo, 
queueId, wals, true, hdfs));



[hbase] branch branch-1.4 updated: Amend HBASE-21048 Get LogLevel is not working from console in secure environment

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 13d6777  Amend HBASE-21048 Get LogLevel is not working from console in 
secure environment
13d6777 is described below

commit 13d677786850bc108987fdf714fd1376e499a5fd
Author: Wei-Chiu Chuang 
AuthorDate: Tue May 14 10:49:25 2019 +0200

Amend HBASE-21048 Get LogLevel is not working from console in secure 
environment

Addenum fix for compile error.

Signed-off-by: Andrew Purtell 
---
 .../src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java | 1 -
 1 file changed, 1 deletion(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
index 224bfcb..c00d13d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
@@ -267,7 +267,6 @@ public class TestLogLevel {
 }
   });
 } finally {
-  clientUGI.logoutUserFromKeytab();
   server.stop();
 }
 



[hbase] branch branch-1 updated: Amend HBASE-21048 Get LogLevel is not working from console in secure environment

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 4d41846  Amend HBASE-21048 Get LogLevel is not working from console in 
secure environment
4d41846 is described below

commit 4d41846246cde7ead5165f07e8daa0f42ce895f1
Author: Wei-Chiu Chuang 
AuthorDate: Tue May 14 10:49:25 2019 +0200

Amend HBASE-21048 Get LogLevel is not working from console in secure 
environment

Addenum fix for compile error.

Signed-off-by: Andrew Purtell 
---
 .../src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java | 1 -
 1 file changed, 1 deletion(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
index 224bfcb..c00d13d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java
@@ -267,7 +267,6 @@ public class TestLogLevel {
 }
   });
 } finally {
-  clientUGI.logoutUserFromKeytab();
   server.stop();
 }
 



[hbase] branch branch-1 updated: Amend HBASE-22377 Provide API to check the existence of a namespace which does not require ADMIN permissions

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 860d7a7  Amend HBASE-22377 Provide API to check the existence of a 
namespace which does not require ADMIN permissions
860d7a7 is described below

commit 860d7a746e4840f2e69ae013317e32388a68f81b
Author: Andrew Purtell 
AuthorDate: Tue May 14 13:54:51 2019 -0700

Amend HBASE-22377 Provide API to check the existence of a namespace which 
does not require ADMIN permissions

Bring back late review feedback from branch-2 version: use toArray in 
listNamespaces
---
 .../src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java| 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index a194e0b..499a8dc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3148,11 +3148,7 @@ public class HBaseAdmin implements Admin {
 controller.setCallTimeout(callTimeout);
 List list = master.listNamespaces(controller,
   ListNamespacesRequest.newBuilder().build()).getNamespaceNameList();
-String[] res = new String[list.size()];
-for(int i = 0; i < list.size(); i++) {
-  res[i] = list.get(i);
-}
-return res;
+return list.toArray(new String[list.size()]);
   }
 });
   }



[hbase] branch branch-1.3 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-1.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.3 by this push:
 new c9d79a2  HBASE-22384 Fixed formatting issues in administration section 
of book
c9d79a2 is described below

commit c9d79a2119fef6cb11d5dac1c4c6dc50494718aa
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index 0ed9ba2..eb0c9a1 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1270,15 +1270,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1310,15 +1310,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1350,11 +1350,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch branch-1.4 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 753aab1  HBASE-22384 Fixed formatting issues in administration section 
of book
753aab1 is described below

commit 753aab13f4828b1a9a154f1dee4e1a835b7f2dc1
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index d6e457f..02e33d5 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1253,15 +1253,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1293,15 +1293,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1333,11 +1333,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch master updated: Amend HBASE-22377 Provide API to check the existence of a namespace which does not require ADMIN permissions

2019-05-14 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 81953d3  Amend HBASE-22377 Provide API to check the existence of a 
namespace which does not require ADMIN permissions
81953d3 is described below

commit 81953d3e00908fc7389cca781e480cee8e3049c3
Author: Andrew Purtell 
AuthorDate: Tue May 14 12:59:10 2019 -0700

Amend HBASE-22377 Provide API to check the existence of a namespace which 
does not require ADMIN permissions

Sync Javadoc updates from branch-1 and branch-2 backports
---
 .../java/org/apache/hadoop/hbase/client/Admin.java | 63 +-
 1 file changed, 49 insertions(+), 14 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index c7d60c5..14abb6e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -212,6 +212,7 @@ public interface Admin extends Abortable, Closeable {
* @param startKey beginning of key range
* @param endKey end of key range
* @param numRegions the total number of regions to create
+   * @throws IOException if a remote or network exception occurs
* @throws IllegalArgumentException if the table name is reserved
* @throws org.apache.hadoop.hbase.MasterNotRunningException if master is 
not running
* @throws org.apache.hadoop.hbase.TableExistsException if table already 
exists (If concurrent
@@ -232,7 +233,7 @@ public interface Admin extends Abortable, Closeable {
* @throws org.apache.hadoop.hbase.MasterNotRunningException if master is 
not running
* @throws org.apache.hadoop.hbase.TableExistsException if table already 
exists (If concurrent
* threads, the table may have been created between test-for-existence and 
attempt-at-creation).
-   * @throws IOException
+   * @throws IOException if a remote or network exception occurs
*/
   default void createTable(TableDescriptor desc, byte[][] splitKeys) throws 
IOException {
 get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), 
TimeUnit.MILLISECONDS);
@@ -321,7 +322,7 @@ public interface Admin extends Abortable, Closeable {
* and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. 
The table has to be in
* disabled state for it to be enabled.
* @param tableName name of the table
-   * @throws IOException if a remote or network exception occurs There could 
be couple types of
+   * @throws IOException There could be couple types of
*   IOException TableNotFoundException means the table doesn't 
exist.
*   TableNotDisabledException means the table isn't in disabled 
state.
* @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
@@ -664,6 +665,7 @@ public interface Admin extends Abortable, Closeable {
* @param switchState Set to true to enable, 
false to disable.
* @param serverNamesList list of region servers.
* @return Previous compaction states for region servers
+   * @throws IOException if a remote or network exception occurs
*/
   Map compactionSwitch(boolean switchState, List 
serverNamesList)
   throws IOException;
@@ -734,6 +736,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Assign a Region.
* @param regionName Region name to assign.
+   * @throws IOException if a remote or network exception occurs
*/
   void assign(byte[] regionName) throws IOException;
 
@@ -745,6 +748,7 @@ public interface Admin extends Abortable, Closeable {
* @param regionName Region to unassign. Will clear any existing RegionPlan 
if one found.
* @param force If true, force unassign (Will remove region 
from regions-in-transition too if
* present. If results in double assignment use hbck -fix to resolve. To be 
used by experts).
+   * @throws IOException if a remote or network exception occurs
*/
   void unassign(byte[] regionName, boolean force)
   throws IOException;
@@ -757,7 +761,7 @@ public interface Admin extends Abortable, Closeable {
* experts or hbck.
*
* @param regionName Region to offline.
-   * @throws IOException
+   * @throws IOException if a remote or network exception occurs
*/
   void offline(byte[] regionName) throws IOException;
 
@@ -767,15 +771,16 @@ public interface Admin extends Abortable, Closeable {
* @param synchronous If true, it waits until current balance() 
call, if outstanding,
*  to return.
* @return Previous balancer value
+   * @throws IOException if a remote or network exception occurs
*/
   boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws 
IOException;
 
-
   /**
* Invoke the balancer.  Will run the balancer and if regions to move, it 

[hbase] branch branch-1 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 869c39f  HBASE-22384 Fixed formatting issues in administration section 
of book
869c39f is described below

commit 869c39f5830f28e8c81c038c589ded1b0415f792
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index d6e457f..02e33d5 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1253,15 +1253,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1293,15 +1293,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1333,11 +1333,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch branch-2.0 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 262f3f4  HBASE-22384 Fixed formatting issues in administration section 
of book
262f3f4 is described below

commit 262f3f43bb4cf8a31f6cb6e85ae6d258d9c46654
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index 56f6566..41a9101 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1283,15 +1283,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1321,15 +1321,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1359,11 +1359,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch branch-2.1 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 9f1eac2  HBASE-22384 Fixed formatting issues in administration section 
of book
9f1eac2 is described below

commit 9f1eac2268ffeb3ad1474667f69e636434268199
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index 56f6566..41a9101 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1283,15 +1283,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1321,15 +1321,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1359,11 +1359,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch branch-2.2 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new af2fccf  HBASE-22384 Fixed formatting issues in administration section 
of book
af2fccf is described below

commit af2fccf5279353432dbf15d1389ec8a11e636425
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index 56f6566..41a9101 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1283,15 +1283,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1321,15 +1321,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1359,11 +1359,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch branch-2 updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 7ade627  HBASE-22384 Fixed formatting issues in administration section 
of book
7ade627 is described below

commit 7ade62759345fc17b7d56608bd988046d215e444
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index f10b55c..1d5b31f 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1274,15 +1274,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1312,15 +1312,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1350,11 +1350,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase] branch master updated: HBASE-22384 Fixed formatting issues in administration section of book

2019-05-14 Thread janh
This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new fde0251  HBASE-22384 Fixed formatting issues in administration section 
of book
fde0251 is described below

commit fde025112b94ee6c13f4fde76d0d36ca23683b01
Author: Jan Hentschel 
AuthorDate: Wed May 8 16:36:19 2019 +0200

HBASE-22384 Fixed formatting issues in administration section of book
---
 src/main/asciidoc/_chapters/security.adoc | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index 2c76285..ef5a321 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -1274,15 +1274,15 @@ public static void addLabels() throws Exception {
 
 hbase> set_auths 'service', [ 'service' ]
 
-
++
 
 hbase> set_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> set_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> set_auths '@qagroup', [ 'test' ]
 
@@ -1312,15 +1312,15 @@ public void testSetAndGetUserAuths() throws Throwable {
 
 hbase> clear_auths 'service', [ 'service' ]
 
-
++
 
 hbase> clear_auths 'testuser', [ 'test' ]
 
-
++
 
 hbase> clear_auths 'qa', [ 'test', 'developer' ]
 
-
++
 
 hbase> clear_auths '@qagroup', [ 'test', 'developer' ]
 
@@ -1350,11 +1350,11 @@ The label is associated with a given version of the 
cell.
 
 hbase> set_visibility 'user', 'admin|service|developer', { COLUMNS => 'i' }
 
-
++
 
 hbase> set_visibility 'user', 'admin|service', { COLUMNS => 'pii' }
 
-
++
 
 hbase> set_visibility 'user', 'test', { COLUMNS => [ 'i', 'pii' ], FILTER => 
"(PrefixFilter ('test'))" }
 



[hbase-filesystem] branch master updated: HBASE-22386 HBOSS: Limit depth that listing locks check for other locks

2019-05-14 Thread elserj
This is an automated email from the ASF dual-hosted git repository.

elserj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-filesystem.git


The following commit(s) were added to refs/heads/master by this push:
 new 074c852  HBASE-22386 HBOSS: Limit depth that listing locks check for 
other locks
074c852 is described below

commit 074c852bd590eec1646ee24ff39fcbc9ce4167ea
Author: Sean Mackrory 
AuthorDate: Tue May 14 11:36:18 2019 -0400

HBASE-22386 HBOSS: Limit depth that listing locks check for other locks

Signed-off-by: Josh Elser 
---
 .../hbase/oss/HBaseObjectStoreSemantics.java   |  18 +--
 .../hadoop/hbase/oss/sync/TreeLockManager.java |  44 ---
 .../hadoop/hbase/oss/sync/ZKTreeLockManager.java   |  57 +
 .../hbase/oss/sync/LocalTreeLockManager.java   |  41 ---
 .../hadoop/hbase/oss/sync/NullTreeLockManager.java |   7 +-
 .../hadoop/hbase/oss/sync/TestTreeLockManager.java | 133 +
 6 files changed, 233 insertions(+), 67 deletions(-)

diff --git 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
index f72d7e3..bebc37c 100644
--- 
a/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
+++ 
b/hbase-oss/src/main/java/org/apache/hadoop/hbase/oss/HBaseObjectStoreSemantics.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.oss.sync.AutoLock;
 import org.apache.hadoop.hbase.oss.sync.AutoLock.LockedFSDataOutputStream;
 import org.apache.hadoop.hbase.oss.sync.AutoLock.LockedRemoteIterator;
 import org.apache.hadoop.hbase.oss.sync.TreeLockManager;
+import org.apache.hadoop.hbase.oss.sync.TreeLockManager.Depth;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -533,7 +534,7 @@ public class HBaseObjectStoreSemantics extends FileSystem {
 
   public FileStatus[] listStatus(Path f) throws FileNotFoundException,
 IOException {
-try (AutoLock l = sync.lockListing(f)) {
+try (AutoLock l = sync.lockListing(f, Depth.DIRECTORY)) {
   return fs.listStatus(f);
 }
   }
@@ -547,21 +548,21 @@ public class HBaseObjectStoreSemantics extends FileSystem 
{
 
   public FileStatus[] listStatus(Path f, PathFilter filter)
 throws FileNotFoundException, IOException {
-try (AutoLock l = sync.lockListing(f)) {
+try (AutoLock l = sync.lockListing(f, Depth.DIRECTORY)) {
   return fs.listStatus(f, filter);
 }
   }
 
   public FileStatus[] listStatus(Path[] files)
 throws FileNotFoundException, IOException {
-try (AutoLock l = sync.lockListings(files)) {
+try (AutoLock l = sync.lockListings(files, Depth.DIRECTORY)) {
   return fs.listStatus(files);
 }
   }
 
   public FileStatus[] listStatus(Path[] files, PathFilter filter)
 throws FileNotFoundException, IOException {
-try (AutoLock l = sync.lockListings(files)) {
+try (AutoLock l = sync.lockListings(files, Depth.DIRECTORY)) {
   return fs.listStatus(files, filter);
 }
   }
@@ -579,7 +580,7 @@ public class HBaseObjectStoreSemantics extends FileSystem {
 
   public RemoteIterator listLocatedStatus(final Path f)
 throws FileNotFoundException, IOException {
-AutoLock lock = sync.lockListing(f);
+AutoLock lock = sync.lockListing(f, Depth.DIRECTORY);
 try {
   RemoteIterator iterator = fs.listLocatedStatus(f);
   return new LockedRemoteIterator(iterator, lock);
@@ -591,7 +592,7 @@ public class HBaseObjectStoreSemantics extends FileSystem {
 
   public RemoteIterator listStatusIterator(final Path p)
 throws FileNotFoundException, IOException {
-AutoLock lock = sync.lockListing(p);
+AutoLock lock = sync.lockListing(p, Depth.DIRECTORY);
 try {
   RemoteIterator iterator = fs.listStatusIterator(p);
   return new LockedRemoteIterator(iterator, lock);
@@ -604,7 +605,8 @@ public class HBaseObjectStoreSemantics extends FileSystem {
   public RemoteIterator listFiles(
 final Path f, final boolean recursive)
 throws FileNotFoundException, IOException {
-AutoLock lock = sync.lockListing(f);
+Depth depth = recursive ? Depth.RECURSIVE : Depth.DIRECTORY;
+AutoLock lock = sync.lockListing(f, depth);
 try {
   RemoteIterator iterator = fs.listFiles(f, recursive);
   return new LockedRemoteIterator(iterator, lock);
@@ -847,7 +849,7 @@ public class HBaseObjectStoreSemantics extends FileSystem {
 
   public Path createSnapshot(Path path, String snapshotName)
 throws IOException {
-try (AutoLock l = sync.lockListing(path)) {
+try (AutoLock l = sync.lockListing(path, Depth.RECURSIVE)) {
   return fs.createSnapshot(path, snapshotName);
 }
   }
diff --git 

[hbase-site] branch asf-site updated: INFRA-10751 Empty commit

2019-05-14 Thread git-site-role
This is an automated email from the ASF dual-hosted git repository.

git-site-role pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hbase-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 4237ace  INFRA-10751 Empty commit
4237ace is described below

commit 4237ace91217150b95783d08c0d4d943837a6d56
Author: jenkins 
AuthorDate: Tue May 14 14:51:54 2019 +

INFRA-10751 Empty commit



[hbase] branch branch-2.2 updated: HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x hadoop checks

2019-05-14 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 5fbbcb7  HBASE-22399 Change default hadoop-two.version to 2.8.x and 
remove the 2.7.x hadoop checks
5fbbcb7 is described below

commit 5fbbcb76b1dac45ddbd03f1727397bc6c4f1f651
Author: Duo Zhang 
AuthorDate: Mon May 13 10:30:13 2019 +0800

HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x 
hadoop checks
---
 dev-support/Jenkinsfile  |  2 +-
 dev-support/hbase-personality.sh | 20 +---
 .../hbase/client/TestReversedScannerCallable.java|  5 +
 pom.xml  | 14 --
 4 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 9f43228..843506e 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -116,7 +116,7 @@ pipeline {
 }
 stage ('hadoop 2 cache') {
   environment {
-HADOOP2_VERSION="2.7.1"
+HADOOP2_VERSION="2.8.2"
   }
   steps {
 // directory must be unique for each parallel stage, because 
jenkins runs them in the same workspace :(
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 9cab756..4631a0a 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -503,27 +503,33 @@ function hadoopcheck_rebuild
 
   # All supported Hadoop versions that we want to test the compilation with
   # See the Hadoop section on prereqs in the HBase Reference Guide
-  hbase_common_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4"
   if [[ "${PATCH_BRANCH}" = branch-1.* ]] && [[ "${PATCH_BRANCH#branch-1.}" 
-lt "5" ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on before-branch-1.5 
rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7"
 else
-  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 ${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7"
 fi
   elif [[ "${PATCH_BRANCH}" = branch-2.0 ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on branch-2.0 rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.6.5 2.7.7 2.8.5"
 else
-  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 
${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 
2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 2.8.3 2.8.4 2.8.5"
+fi
+  elif [[ "${PATCH_BRANCH}" = branch-2.1 ]]; then
+yetus_info "Setting Hadoop 2 versions to test based on branch-2.1 rules."
+if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
+  hbase_hadoop2_versions="2.7.7 2.8.5"
+else
+  hbase_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 
2.8.3 2.8.4 2.8.5"
 fi
   else
 yetus_info "Setting Hadoop 2 versions to test based on 
branch-1.5+/branch-2.1+/master/feature branch rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.7.4"
+  hbase_hadoop2_versions="2.8.5 2.9.2"
 else
-  hbase_hadoop2_versions="${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.8.2 2.8.3 2.8.4 2.8.5 2.9.1 2.9.2"
 fi
   fi
   hbase_hadoop3_versions="3.0.0"
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
index 8cef45d..4432192 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
@@ -57,13 +57,10 @@ public class TestReversedScannerCallable {
 
   @Before
   public void setUp() throws Exception {
-byte[] ROW_BEFORE = ConnectionUtils.createCloseRowBefore(ROW);
-
-Configuration conf = Mockito.mock(Configuration.class);
 HRegionLocation regionLocation = Mockito.mock(HRegionLocation.class);
 ServerName serverName = Mockito.mock(ServerName.class);
 
-Mockito.when(connection.getConfiguration()).thenReturn(conf);
+Mockito.when(connection.getConfiguration()).thenReturn(new 
Configuration());
 Mockito.when(regionLocations.size()).thenReturn(1);
 
Mockito.when(regionLocations.getRegionLocation(0)).thenReturn(regionLocation);
 Mockito.when(regionLocation.getHostname()).thenReturn("localhost");
diff --git a/pom.xml 

[hbase] branch branch-2 updated: HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x hadoop checks

2019-05-14 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 6bd6a95  HBASE-22399 Change default hadoop-two.version to 2.8.x and 
remove the 2.7.x hadoop checks
6bd6a95 is described below

commit 6bd6a953a71ecfdd53700f2a6a37ebac285a8bfe
Author: Duo Zhang 
AuthorDate: Mon May 13 10:30:13 2019 +0800

HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x 
hadoop checks
---
 dev-support/Jenkinsfile  |  2 +-
 dev-support/hbase-personality.sh | 20 +---
 .../hbase/client/TestReversedScannerCallable.java|  5 +
 pom.xml  | 14 --
 4 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 9f43228..843506e 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -116,7 +116,7 @@ pipeline {
 }
 stage ('hadoop 2 cache') {
   environment {
-HADOOP2_VERSION="2.7.1"
+HADOOP2_VERSION="2.8.2"
   }
   steps {
 // directory must be unique for each parallel stage, because 
jenkins runs them in the same workspace :(
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 9cab756..4631a0a 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -503,27 +503,33 @@ function hadoopcheck_rebuild
 
   # All supported Hadoop versions that we want to test the compilation with
   # See the Hadoop section on prereqs in the HBase Reference Guide
-  hbase_common_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4"
   if [[ "${PATCH_BRANCH}" = branch-1.* ]] && [[ "${PATCH_BRANCH#branch-1.}" 
-lt "5" ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on before-branch-1.5 
rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7"
 else
-  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 ${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7"
 fi
   elif [[ "${PATCH_BRANCH}" = branch-2.0 ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on branch-2.0 rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.6.5 2.7.7 2.8.5"
 else
-  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 
${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 
2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 2.8.3 2.8.4 2.8.5"
+fi
+  elif [[ "${PATCH_BRANCH}" = branch-2.1 ]]; then
+yetus_info "Setting Hadoop 2 versions to test based on branch-2.1 rules."
+if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
+  hbase_hadoop2_versions="2.7.7 2.8.5"
+else
+  hbase_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 
2.8.3 2.8.4 2.8.5"
 fi
   else
 yetus_info "Setting Hadoop 2 versions to test based on 
branch-1.5+/branch-2.1+/master/feature branch rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.7.4"
+  hbase_hadoop2_versions="2.8.5 2.9.2"
 else
-  hbase_hadoop2_versions="${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.8.2 2.8.3 2.8.4 2.8.5 2.9.1 2.9.2"
 fi
   fi
   hbase_hadoop3_versions="3.0.0"
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
index 8cef45d..4432192 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
@@ -57,13 +57,10 @@ public class TestReversedScannerCallable {
 
   @Before
   public void setUp() throws Exception {
-byte[] ROW_BEFORE = ConnectionUtils.createCloseRowBefore(ROW);
-
-Configuration conf = Mockito.mock(Configuration.class);
 HRegionLocation regionLocation = Mockito.mock(HRegionLocation.class);
 ServerName serverName = Mockito.mock(ServerName.class);
 
-Mockito.when(connection.getConfiguration()).thenReturn(conf);
+Mockito.when(connection.getConfiguration()).thenReturn(new 
Configuration());
 Mockito.when(regionLocations.size()).thenReturn(1);
 
Mockito.when(regionLocations.getRegionLocation(0)).thenReturn(regionLocation);
 Mockito.when(regionLocation.getHostname()).thenReturn("localhost");
diff --git a/pom.xml 

[hbase] branch master updated: HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x hadoop checks

2019-05-14 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 083605d  HBASE-22399 Change default hadoop-two.version to 2.8.x and 
remove the 2.7.x hadoop checks
083605d is described below

commit 083605df8f68ba5bfb702c11f4bd152c9b259182
Author: Duo Zhang 
AuthorDate: Mon May 13 10:30:13 2019 +0800

HBASE-22399 Change default hadoop-two.version to 2.8.x and remove the 2.7.x 
hadoop checks
---
 dev-support/Jenkinsfile  |  2 +-
 dev-support/hbase-personality.sh | 20 +---
 .../hbase/client/TestReversedScannerCallable.java|  5 +
 pom.xml  | 14 --
 4 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 88532ed..4c8fd5d 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -116,7 +116,7 @@ pipeline {
 }
 stage ('hadoop 2 cache') {
   environment {
-HADOOP2_VERSION="2.7.1"
+HADOOP2_VERSION="2.8.2"
   }
   steps {
 // directory must be unique for each parallel stage, because 
jenkins runs them in the same workspace :(
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index ceca596..1d59b28 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -504,27 +504,33 @@ function hadoopcheck_rebuild
 
   # All supported Hadoop versions that we want to test the compilation with
   # See the Hadoop section on prereqs in the HBase Reference Guide
-  hbase_common_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4"
   if [[ "${PATCH_BRANCH}" = branch-1.* ]] && [[ "${PATCH_BRANCH#branch-1.}" 
-lt "5" ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on before-branch-1.5 
rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.4.1 2.5.2 2.6.5 2.7.7"
 else
-  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 ${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7"
 fi
   elif [[ "${PATCH_BRANCH}" = branch-2.0 ]]; then
 yetus_info "Setting Hadoop 2 versions to test based on branch-2.0 rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.6.5 2.7.4"
+  hbase_hadoop2_versions="2.6.5 2.7.7 2.8.5"
 else
-  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 
${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3 
2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 2.8.3 2.8.4 2.8.5"
+fi
+  elif [[ "${PATCH_BRANCH}" = branch-2.1 ]]; then
+yetus_info "Setting Hadoop 2 versions to test based on branch-2.1 rules."
+if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
+  hbase_hadoop2_versions="2.7.7 2.8.5"
+else
+  hbase_hadoop2_versions="2.7.1 2.7.2 2.7.3 2.7.4 2.7.5 2.7.6 2.7.7 2.8.2 
2.8.3 2.8.4 2.8.5"
 fi
   else
 yetus_info "Setting Hadoop 2 versions to test based on 
branch-1.5+/branch-2.1+/master/feature branch rules."
 if [[ "${QUICK_HADOOPCHECK}" == "true" ]]; then
-  hbase_hadoop2_versions="2.7.4"
+  hbase_hadoop2_versions="2.8.5 2.9.2"
 else
-  hbase_hadoop2_versions="${hbase_common_hadoop2_versions}"
+  hbase_hadoop2_versions="2.8.2 2.8.3 2.8.4 2.8.5 2.9.1 2.9.2"
 fi
   fi
   hbase_hadoop3_versions="3.0.0"
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
index 8cef45d..4432192 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java
@@ -57,13 +57,10 @@ public class TestReversedScannerCallable {
 
   @Before
   public void setUp() throws Exception {
-byte[] ROW_BEFORE = ConnectionUtils.createCloseRowBefore(ROW);
-
-Configuration conf = Mockito.mock(Configuration.class);
 HRegionLocation regionLocation = Mockito.mock(HRegionLocation.class);
 ServerName serverName = Mockito.mock(ServerName.class);
 
-Mockito.when(connection.getConfiguration()).thenReturn(conf);
+Mockito.when(connection.getConfiguration()).thenReturn(new 
Configuration());
 Mockito.when(regionLocations.size()).thenReturn(1);
 
Mockito.when(regionLocations.getRegionLocation(0)).thenReturn(regionLocation);
 Mockito.when(regionLocation.getHostname()).thenReturn("localhost");
diff --git a/pom.xml b/pom.xml