hbase git commit: HBASE-21242 [amv2] Miscellaneous minor log and assign procedure create improvements

2018-10-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 14ad3c365 -> 6601dcfbe


HBASE-21242 [amv2] Miscellaneous minor log and assign procedure create 
improvements

For RIT Duration, do better than print ms/seconds. Remove redundant UI
column dedicated to duration when we log it in the status field too.

Make bypass log at INFO level.

Make it so on complete of subprocedure, we note count of outstanding
siblings so we have a clue how much further the parent has to go before
it is done (Helpful when hundreds of servers doing SCP).

Have the SCP run the AP preflight check before creating an AP; saves
creation of thousands of APs during fixup.

Don't log tablename three times when reporting remote call failed.

If lock is held already, note who has it. Also log after we get lock
or if we have to wait rather than log on entrance though we may
later have to wait (or we may have just picked up the lock).


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6601dcfb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6601dcfb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6601dcfb

Branch: refs/heads/branch-2.0
Commit: 6601dcfbe00c7e6b6b2274a4e444d821b3f10eca
Parents: 14ad3c3
Author: Michael Stack 
Authored: Wed Sep 26 21:22:46 2018 -0700
Committer: Michael Stack 
Committed: Thu Oct 11 22:07:44 2018 -0700

--
 .../apache/hadoop/hbase/master/RegionState.java |  6 +--
 .../hadoop/hbase/procedure2/Procedure.java  |  3 ++
 .../hbase/procedure2/ProcedureExecutor.java | 30 +--
 .../master/AssignmentManagerStatusTmpl.jamon|  3 +-
 .../master/assignment/AssignProcedure.java  | 40 
 .../assignment/RegionTransitionProcedure.java   |  3 +-
 .../master/assignment/UnassignProcedure.java|  2 +-
 .../procedure/MasterProcedureScheduler.java |  5 ++-
 .../master/procedure/ServerCrashProcedure.java  |  8 
 .../hbase/regionserver/TestHRegionInfo.java |  7 +++-
 10 files changed, 79 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6601dcfb/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
index 7289ce8..a1e2ca6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
@@ -371,9 +371,9 @@ public class RegionState {
   public String toDescriptiveString() {
 long relTime = System.currentTimeMillis() - stamp;
 return hri.getRegionNameAsString()
-  + " state=" + state
-  + ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)"
-  + ", server=" + serverName;
+  + " state=" + state + ", ts=" + new Date(stamp) + " (" +
+java.time.Duration.ofMillis(relTime).toString() +
+" ago), server=" + serverName;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/6601dcfb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 249ae34..a1391a5 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -884,6 +884,9 @@ public abstract class Procedure implements 
Comparable 0;
   }
 
+  /**
+   * @return Count of children outstanding (Badly named).
+   */
   protected synchronized int getChildrenLatch() {
 return childrenLatch;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6601dcfb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 924c94e..a20ae82 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1053,21 +1053,21 @@ public class ProcedureExecutor {
   return false;
 }
 
-LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}",
+LOG.info("Begin bypass {} with lockWait={}, override={}, recursive={}",
 procedure, 

hbase git commit: HBASE-21254 Need to find a way to limit the number of proc wal files

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 c9c743648 -> c3401d432


HBASE-21254 Need to find a way to limit the number of proc wal files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c3401d43
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c3401d43
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c3401d43

Branch: refs/heads/branch-2.1
Commit: c3401d43278c5b204cdbc60473a1b7d29c1daaf7
Parents: c9c7436
Author: Duo Zhang 
Authored: Thu Oct 11 15:43:11 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 11:47:48 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  41 +++-
 .../hbase/procedure2/store/BitSetNode.java  |  23 ++
 .../hbase/procedure2/store/ProcedureStore.java  |  26 ++-
 .../procedure2/store/ProcedureStoreBase.java|  22 +-
 .../procedure2/store/ProcedureStoreTracker.java |  13 +-
 .../procedure2/store/wal/WALProcedureStore.java |  40 +++-
 .../store/TestProcedureStoreTracker.java|  64 ++
 .../store/wal/TestForceUpdateProcedure.java | 218 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +-
 .../master/procedure/MasterProcedureEnv.java|  21 --
 .../master/assignment/MockMasterServices.java   |   9 +-
 11 files changed, 383 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c3401d43/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index cb191aa..8e3aacf 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -30,6 +30,8 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -55,6 +58,7 @@ import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -339,6 +343,9 @@ public class ProcedureExecutor {
*/
   private final ProcedureScheduler scheduler;
 
+  private final Executor forceUpdateExecutor = 
Executors.newSingleThreadExecutor(
+new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build());
+
   private final AtomicLong lastProcId = new AtomicLong(-1);
   private final AtomicLong workerId = new AtomicLong(0);
   private final AtomicInteger activeExecutorCount = new AtomicInteger(0);
@@ -361,6 +368,25 @@ public class ProcedureExecutor {
 this(conf, environment, store, new SimpleProcedureScheduler());
   }
 
+  private void forceUpdateProcedure(long procId) throws IOException {
+IdLock.Entry lockEntry = procExecutionLock.getLockEntry(procId);
+try {
+  Procedure proc = procedures.get(procId);
+  if (proc == null) {
+LOG.debug("No pending procedure with id = {}, skip force updating.", 
procId);
+return;
+  }
+  if (proc.isFinished()) {
+LOG.debug("Procedure {} has already been finished, skip force 
updating.", proc);
+return;
+  }
+  LOG.debug("Force update procedure {}", proc);
+  store.update(proc);
+} finally {
+  procExecutionLock.releaseLockEntry(lockEntry);
+}
+  }
+
   public ProcedureExecutor(final Configuration conf, final TEnvironment 
environment,
   final ProcedureStore store, final ProcedureScheduler scheduler) {
 this.environment = environment;
@@ -369,7 +395,19 @@ public class ProcedureExecutor {
 this.conf = 

hbase git commit: HBASE-21254 Need to find a way to limit the number of proc wal files

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 5755f4a9f -> 14ad3c365


HBASE-21254 Need to find a way to limit the number of proc wal files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14ad3c36
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14ad3c36
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14ad3c36

Branch: refs/heads/branch-2.0
Commit: 14ad3c365241632315bddb47bb823e7abe957361
Parents: 5755f4a
Author: Duo Zhang 
Authored: Thu Oct 11 15:43:11 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 11:23:09 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  41 +++-
 .../hbase/procedure2/store/BitSetNode.java  |  23 ++
 .../hbase/procedure2/store/ProcedureStore.java  |  26 ++-
 .../procedure2/store/ProcedureStoreBase.java|  22 +-
 .../procedure2/store/ProcedureStoreTracker.java |  13 +-
 .../procedure2/store/wal/WALProcedureStore.java |  43 ++--
 .../store/TestProcedureStoreTracker.java|  64 ++
 .../store/wal/TestForceUpdateProcedure.java | 218 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +-
 .../master/procedure/MasterProcedureEnv.java|  21 --
 .../master/assignment/MockMasterServices.java   |   9 +-
 11 files changed, 383 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14ad3c36/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index cc73379..924c94e 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -30,6 +30,8 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -55,6 +58,7 @@ import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -339,6 +343,9 @@ public class ProcedureExecutor {
*/
   private final ProcedureScheduler scheduler;
 
+  private final Executor forceUpdateExecutor = 
Executors.newSingleThreadExecutor(
+new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build());
+
   private final AtomicLong lastProcId = new AtomicLong(-1);
   private final AtomicLong workerId = new AtomicLong(0);
   private final AtomicInteger activeExecutorCount = new AtomicInteger(0);
@@ -361,6 +368,25 @@ public class ProcedureExecutor {
 this(conf, environment, store, new SimpleProcedureScheduler());
   }
 
+  private void forceUpdateProcedure(long procId) throws IOException {
+IdLock.Entry lockEntry = procExecutionLock.getLockEntry(procId);
+try {
+  Procedure proc = procedures.get(procId);
+  if (proc == null) {
+LOG.debug("No pending procedure with id = {}, skip force updating.", 
procId);
+return;
+  }
+  if (proc.isFinished()) {
+LOG.debug("Procedure {} has already been finished, skip force 
updating.", proc);
+return;
+  }
+  LOG.debug("Force update procedure {}", proc);
+  store.update(proc);
+} finally {
+  procExecutionLock.releaseLockEntry(lockEntry);
+}
+  }
+
   public ProcedureExecutor(final Configuration conf, final TEnvironment 
environment,
   final ProcedureStore store, final ProcedureScheduler scheduler) {
 this.environment = environment;
@@ -369,7 +395,19 @@ public class ProcedureExecutor {
 this.conf = 

hbase git commit: HBASE-21254 Need to find a way to limit the number of proc wal files

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 2b1716fd8 -> 9da4c1393


HBASE-21254 Need to find a way to limit the number of proc wal files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9da4c139
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9da4c139
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9da4c139

Branch: refs/heads/branch-2
Commit: 9da4c1393d293835608e88ef44f9de505a33b4b8
Parents: 2b1716f
Author: Duo Zhang 
Authored: Thu Oct 11 15:43:11 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 11:05:21 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  41 +++-
 .../hbase/procedure2/store/BitSetNode.java  |  23 ++
 .../hbase/procedure2/store/ProcedureStore.java  |  26 ++-
 .../procedure2/store/ProcedureStoreBase.java|  22 +-
 .../procedure2/store/ProcedureStoreTracker.java |  13 +-
 .../procedure2/store/wal/WALProcedureStore.java |  40 +++-
 .../store/TestProcedureStoreTracker.java|  64 ++
 .../store/wal/TestForceUpdateProcedure.java | 217 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +-
 .../master/procedure/MasterProcedureEnv.java|  21 --
 .../master/assignment/MockMasterServices.java   |   9 +-
 11 files changed, 382 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9da4c139/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 12520d6..b7c1ac8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -31,6 +31,8 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -44,6 +46,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -56,6 +59,7 @@ import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -347,6 +351,9 @@ public class ProcedureExecutor {
*/
   private final ProcedureScheduler scheduler;
 
+  private final Executor forceUpdateExecutor = 
Executors.newSingleThreadExecutor(
+new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build());
+
   private final AtomicLong lastProcId = new AtomicLong(-1);
   private final AtomicLong workerId = new AtomicLong(0);
   private final AtomicInteger activeExecutorCount = new AtomicInteger(0);
@@ -369,6 +376,25 @@ public class ProcedureExecutor {
 this(conf, environment, store, new SimpleProcedureScheduler());
   }
 
+  private void forceUpdateProcedure(long procId) throws IOException {
+IdLock.Entry lockEntry = procExecutionLock.getLockEntry(procId);
+try {
+  Procedure proc = procedures.get(procId);
+  if (proc == null) {
+LOG.debug("No pending procedure with id = {}, skip force updating.", 
procId);
+return;
+  }
+  if (proc.isFinished()) {
+LOG.debug("Procedure {} has already been finished, skip force 
updating.", proc);
+return;
+  }
+  LOG.debug("Force update procedure {}", proc);
+  store.update(proc);
+} finally {
+  procExecutionLock.releaseLockEntry(lockEntry);
+}
+  }
+
   public ProcedureExecutor(final Configuration conf, final TEnvironment 
environment,
   final ProcedureStore store, final ProcedureScheduler scheduler) {
 this.environment = environment;
@@ -377,7 +403,19 @@ public class ProcedureExecutor {
 this.conf = 

hbase git commit: HBASE-21254 Need to find a way to limit the number of proc wal files

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master da63ebb2c -> 9e9a1e0f0


HBASE-21254 Need to find a way to limit the number of proc wal files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e9a1e0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e9a1e0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e9a1e0f

Branch: refs/heads/master
Commit: 9e9a1e0f0d58d6c276bf5ad4df96285e87854d54
Parents: da63ebb
Author: Duo Zhang 
Authored: Thu Oct 11 15:43:11 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 11:05:13 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  41 +++-
 .../hbase/procedure2/store/BitSetNode.java  |  23 ++
 .../hbase/procedure2/store/ProcedureStore.java  |  26 ++-
 .../procedure2/store/ProcedureStoreBase.java|  22 +-
 .../procedure2/store/ProcedureStoreTracker.java |  13 +-
 .../procedure2/store/wal/WALProcedureStore.java |  40 +++-
 .../store/TestProcedureStoreTracker.java|  64 ++
 .../store/wal/TestForceUpdateProcedure.java | 217 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +-
 .../master/procedure/MasterProcedureEnv.java|  21 --
 .../master/assignment/MockMasterServices.java   |   9 +-
 11 files changed, 382 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e9a1e0f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 85d7e0b..7b5ab73 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -31,6 +31,8 @@ import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -44,6 +46,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -56,6 +59,7 @@ import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -347,6 +351,9 @@ public class ProcedureExecutor {
*/
   private final ProcedureScheduler scheduler;
 
+  private final Executor forceUpdateExecutor = 
Executors.newSingleThreadExecutor(
+new 
ThreadFactoryBuilder().setDaemon(true).setNameFormat("Force-Update-PEWorker-%d").build());
+
   private final AtomicLong lastProcId = new AtomicLong(-1);
   private final AtomicLong workerId = new AtomicLong(0);
   private final AtomicInteger activeExecutorCount = new AtomicInteger(0);
@@ -369,6 +376,25 @@ public class ProcedureExecutor {
 this(conf, environment, store, new SimpleProcedureScheduler());
   }
 
+  private void forceUpdateProcedure(long procId) throws IOException {
+IdLock.Entry lockEntry = procExecutionLock.getLockEntry(procId);
+try {
+  Procedure proc = procedures.get(procId);
+  if (proc == null) {
+LOG.debug("No pending procedure with id = {}, skip force updating.", 
procId);
+return;
+  }
+  if (proc.isFinished()) {
+LOG.debug("Procedure {} has already been finished, skip force 
updating.", proc);
+return;
+  }
+  LOG.debug("Force update procedure {}", proc);
+  store.update(proc);
+} finally {
+  procExecutionLock.releaseLockEntry(lockEntry);
+}
+  }
+
   public ProcedureExecutor(final Configuration conf, final TEnvironment 
environment,
   final ProcedureStore store, final ProcedureScheduler scheduler) {
 this.environment = environment;
@@ -377,7 +403,19 @@ public class ProcedureExecutor {
 this.conf = conf;
   

hbase git commit: HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 924d183ba -> da63ebb2c


HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

Signed-off-by: Duo Zhang 
Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da63ebb2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da63ebb2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da63ebb2

Branch: refs/heads/master
Commit: da63ebb2c034f3cce25b390377e9dd9f70d9acae
Parents: 924d183
Author: Zephyr Guo 
Authored: Fri Oct 12 10:59:13 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 10:59:35 2018 +0800

--
 .../org/apache/hadoop/hbase/util/Random64.java  | 149 +++
 .../hadoop/hbase/chaos/actions/Action.java  |  15 +-
 .../test/IntegrationTestBigLinkedList.java  |  66 ++--
 3 files changed, 209 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/da63ebb2/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
new file mode 100644
index 000..f337b5f
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
@@ -0,0 +1,149 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ *
+ * An instance of this class is used to generate a stream of
+ * pseudorandom numbers. The class uses a 64-bit seed, which is
+ * modified using a linear congruential formula.
+ *
+ * see https://en.wikipedia.org/wiki/Linear_congruential_generator
+ */
+@InterfaceAudience.Private
+public class Random64 {
+
+  private static final long multiplier = 6364136223846793005L;
+  private static final long addend = 1442695040888963407L;
+
+  private static final AtomicLong seedUniquifier
+= new AtomicLong(8682522807148012L);
+
+  private long seed;
+
+  /**
+   * Copy from {@link Random#seedUniquifier()}
+   */
+  private static long seedUniquifier() {
+for (; ; ) {
+  long current = seedUniquifier.get();
+  long next = current * 181783497276652981L;
+  if (seedUniquifier.compareAndSet(current, next)) {
+return next;
+  }
+}
+  }
+
+  public Random64() {
+this(seedUniquifier() ^ System.nanoTime());
+  }
+
+  public Random64(long seed) {
+this.seed = seed;
+  }
+
+  public long nextLong() {
+return next64(64);
+  }
+
+  public void nextBytes(byte[] bytes) {
+for (int i = 0, len = bytes.length; i < len;) {
+  // We regard seed as unsigned long, therefore used '>>>' instead of '>>'.
+  for (long rnd = nextLong(), n = Math.min(len - i, Long.SIZE / Byte.SIZE);
+   n-- > 0; rnd >>>= Byte.SIZE) {
+bytes[i++] = (byte) rnd;
+  }
+}
+  }
+
+  private long next64(int bits) {
+seed = seed * multiplier + addend;
+return seed >>> (64 - bits);
+  }
+
+
+  /**
+   * Random64 is a pseudorandom algorithm(LCG). Therefore, we will get same 
sequence
+   * if seeds are the same. This main will test how many calls nextLong() it 
will
+   * get the same seed.
+   *
+   * We do not need to save all numbers (that is too large). We could save
+   * once every 10 calls nextLong(). If it get a same seed, we can
+   * detect this by calling nextLong() 10 times continuously.
+   *
+   */
+  public static void main(String[] args) {
+long defaultTotalTestCnt = 1L; // 1 trillion
+
+if (args.length == 1) {
+  defaultTotalTestCnt = Long.parseLong(args[0]);
+}
+
+Preconditions.checkArgument(defaultTotalTestCnt > 0, "totalTestCnt <= 0");
+
+

hbase git commit: HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

2018-10-11 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a074269ee -> 2b1716fd8


HBASE-21256 Improve IntegrationTestBigLinkedList for testing huge data

Signed-off-by: Duo Zhang 
Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b1716fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b1716fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b1716fd

Branch: refs/heads/branch-2
Commit: 2b1716fd8e7398035a32d6266d3a37511c399b1c
Parents: a074269
Author: Zephyr Guo 
Authored: Fri Oct 12 10:59:13 2018 +0800
Committer: Duo Zhang 
Committed: Fri Oct 12 11:00:03 2018 +0800

--
 .../org/apache/hadoop/hbase/util/Random64.java  | 149 +++
 .../hadoop/hbase/chaos/actions/Action.java  |  15 +-
 .../test/IntegrationTestBigLinkedList.java  |  66 ++--
 3 files changed, 209 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b1716fd/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
new file mode 100644
index 000..f337b5f
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
@@ -0,0 +1,149 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ *
+ * An instance of this class is used to generate a stream of
+ * pseudorandom numbers. The class uses a 64-bit seed, which is
+ * modified using a linear congruential formula.
+ *
+ * see https://en.wikipedia.org/wiki/Linear_congruential_generator
+ */
+@InterfaceAudience.Private
+public class Random64 {
+
+  private static final long multiplier = 6364136223846793005L;
+  private static final long addend = 1442695040888963407L;
+
+  private static final AtomicLong seedUniquifier
+= new AtomicLong(8682522807148012L);
+
+  private long seed;
+
+  /**
+   * Copy from {@link Random#seedUniquifier()}
+   */
+  private static long seedUniquifier() {
+for (; ; ) {
+  long current = seedUniquifier.get();
+  long next = current * 181783497276652981L;
+  if (seedUniquifier.compareAndSet(current, next)) {
+return next;
+  }
+}
+  }
+
+  public Random64() {
+this(seedUniquifier() ^ System.nanoTime());
+  }
+
+  public Random64(long seed) {
+this.seed = seed;
+  }
+
+  public long nextLong() {
+return next64(64);
+  }
+
+  public void nextBytes(byte[] bytes) {
+for (int i = 0, len = bytes.length; i < len;) {
+  // We regard seed as unsigned long, therefore used '>>>' instead of '>>'.
+  for (long rnd = nextLong(), n = Math.min(len - i, Long.SIZE / Byte.SIZE);
+   n-- > 0; rnd >>>= Byte.SIZE) {
+bytes[i++] = (byte) rnd;
+  }
+}
+  }
+
+  private long next64(int bits) {
+seed = seed * multiplier + addend;
+return seed >>> (64 - bits);
+  }
+
+
+  /**
+   * Random64 is a pseudorandom algorithm(LCG). Therefore, we will get same 
sequence
+   * if seeds are the same. This main will test how many calls nextLong() it 
will
+   * get the same seed.
+   *
+   * We do not need to save all numbers (that is too large). We could save
+   * once every 10 calls nextLong(). If it get a same seed, we can
+   * detect this by calling nextLong() 10 times continuously.
+   *
+   */
+  public static void main(String[] args) {
+long defaultTotalTestCnt = 1L; // 1 trillion
+
+if (args.length == 1) {
+  defaultTotalTestCnt = Long.parseLong(args[0]);
+}
+
+Preconditions.checkArgument(defaultTotalTestCnt > 0, "totalTestCnt <= 0");
+

hbase git commit: HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal printer tool, for debugability purposes

2018-10-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 6b9dfb9aa -> 6165d4b10


HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal 
printer tool, for debugability purposes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6165d4b1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6165d4b1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6165d4b1

Branch: refs/heads/branch-1.2
Commit: 6165d4b107a860421a3ffe28bce7e610359d93d6
Parents: 6b9dfb9
Author: wellington 
Authored: Sat Oct 6 02:19:57 2018 +0100
Committer: Michael Stack 
Committed: Thu Oct 11 14:44:42 2018 -0700

--
 .../hadoop/hbase/wal/WALPrettyPrinter.java  | 24 ++--
 1 file changed, 22 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6165d4b1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index be83ae0..33522a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -81,6 +81,8 @@ public class WALPrettyPrinter {
   // for JSON encoding
   private static final ObjectMapper MAPPER = new ObjectMapper();
 
+  private long position;
+
   /**
* Basic constructor that simply initializes values to reasonable defaults.
*/
@@ -121,7 +123,8 @@ public class WALPrettyPrinter {
*  PrettyPrinter's output.
*/
   public WALPrettyPrinter(boolean outputValues, boolean outputJSON,
-  long sequence, String region, String row, boolean persistentOutput,
+  long sequence, String region, String row, boolean
+  persistentOutput,
   PrintStream out) {
 this.outputValues = outputValues;
 this.outputJSON = outputJSON;
@@ -269,7 +272,11 @@ public class WALPrettyPrinter {
   out.print("[");
   firstTxn = true;
 }
-
+
+if (position > 0) {
+  log.seek(position);
+}
+
 try {
   WAL.Entry entry;
   while ((entry = log.next()) != null) {
@@ -293,6 +300,8 @@ public class WALPrettyPrinter {
   if (row == null || ((String) op.get("row")).equals(row)) {
 actions.add(op);
   }
+  op.put("total_size_sum", CellUtil.estimatedHeapSizeOf(cell));
+
 }
 if (actions.size() == 0)
   continue;
@@ -317,8 +326,11 @@ public class WALPrettyPrinter {
   out.println("tag: " + op.get("tag"));
 }
 if (outputValues) out.println("value: " + op.get("value"));
+out.println("cell total size sum: " + op.get("total_size_sum"));
   }
 }
+out.println("edit heap size: " + entry.getEdit().heapSize());
+out.println("position: " + log.getPosition());
   }
 } finally {
   log.close();
@@ -377,6 +389,7 @@ public class WALPrettyPrinter {
 options.addOption("s", "sequence", true,
 "Sequence to filter by. Pass sequence number.");
 options.addOption("w", "row", true, "Row to filter by. Pass row name.");
+options.addOption("g", "goto", true, "Position to seek to in the file");
 
 WALPrettyPrinter printer = new WALPrettyPrinter();
 CommandLineParser parser = new PosixParser();
@@ -400,6 +413,9 @@ public class WALPrettyPrinter {
 printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s")));
   if (cmd.hasOption("w"))
 printer.setRowFilter(cmd.getOptionValue("w"));
+  if (cmd.hasOption("g")) {
+printer.setPosition(Long.parseLong(cmd.getOptionValue("g")));
+  }
 } catch (ParseException e) {
   e.printStackTrace();
   HelpFormatter formatter = new HelpFormatter();
@@ -423,4 +439,8 @@ public class WALPrettyPrinter {
 }
 printer.endPersistentOutput();
   }
+
+  public void setPosition(long position) {
+this.position = position;
+  }
 }



hbase git commit: HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal printer tool, for debugability purposes

2018-10-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 bd690be7a -> 59cb65aa3


HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal 
printer tool, for debugability purposes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59cb65aa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59cb65aa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59cb65aa

Branch: refs/heads/branch-1.4
Commit: 59cb65aa30be53dfbf9ff56ec6adef1255cd7e34
Parents: bd690be
Author: wellington 
Authored: Sat Oct 6 02:19:57 2018 +0100
Committer: Michael Stack 
Committed: Thu Oct 11 14:43:06 2018 -0700

--
 .../hadoop/hbase/wal/WALPrettyPrinter.java  | 24 ++--
 1 file changed, 22 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59cb65aa/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index be83ae0..33522a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -81,6 +81,8 @@ public class WALPrettyPrinter {
   // for JSON encoding
   private static final ObjectMapper MAPPER = new ObjectMapper();
 
+  private long position;
+
   /**
* Basic constructor that simply initializes values to reasonable defaults.
*/
@@ -121,7 +123,8 @@ public class WALPrettyPrinter {
*  PrettyPrinter's output.
*/
   public WALPrettyPrinter(boolean outputValues, boolean outputJSON,
-  long sequence, String region, String row, boolean persistentOutput,
+  long sequence, String region, String row, boolean
+  persistentOutput,
   PrintStream out) {
 this.outputValues = outputValues;
 this.outputJSON = outputJSON;
@@ -269,7 +272,11 @@ public class WALPrettyPrinter {
   out.print("[");
   firstTxn = true;
 }
-
+
+if (position > 0) {
+  log.seek(position);
+}
+
 try {
   WAL.Entry entry;
   while ((entry = log.next()) != null) {
@@ -293,6 +300,8 @@ public class WALPrettyPrinter {
   if (row == null || ((String) op.get("row")).equals(row)) {
 actions.add(op);
   }
+  op.put("total_size_sum", CellUtil.estimatedHeapSizeOf(cell));
+
 }
 if (actions.size() == 0)
   continue;
@@ -317,8 +326,11 @@ public class WALPrettyPrinter {
   out.println("tag: " + op.get("tag"));
 }
 if (outputValues) out.println("value: " + op.get("value"));
+out.println("cell total size sum: " + op.get("total_size_sum"));
   }
 }
+out.println("edit heap size: " + entry.getEdit().heapSize());
+out.println("position: " + log.getPosition());
   }
 } finally {
   log.close();
@@ -377,6 +389,7 @@ public class WALPrettyPrinter {
 options.addOption("s", "sequence", true,
 "Sequence to filter by. Pass sequence number.");
 options.addOption("w", "row", true, "Row to filter by. Pass row name.");
+options.addOption("g", "goto", true, "Position to seek to in the file");
 
 WALPrettyPrinter printer = new WALPrettyPrinter();
 CommandLineParser parser = new PosixParser();
@@ -400,6 +413,9 @@ public class WALPrettyPrinter {
 printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s")));
   if (cmd.hasOption("w"))
 printer.setRowFilter(cmd.getOptionValue("w"));
+  if (cmd.hasOption("g")) {
+printer.setPosition(Long.parseLong(cmd.getOptionValue("g")));
+  }
 } catch (ParseException e) {
   e.printStackTrace();
   HelpFormatter formatter = new HelpFormatter();
@@ -423,4 +439,8 @@ public class WALPrettyPrinter {
 }
 printer.endPersistentOutput();
   }
+
+  public void setPosition(long position) {
+this.position = position;
+  }
 }



hbase git commit: HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal printer tool, for debugability purposes

2018-10-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9050402ff -> b73aab8bc


HBASE-21185 - WALPrettyPrinter: Additional useful info to be printed by wal 
printer tool, for debugability purposes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b73aab8b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b73aab8b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b73aab8b

Branch: refs/heads/branch-1
Commit: b73aab8bc12bc0b9379fb6cef64e2b44e458be3a
Parents: 9050402
Author: wellington 
Authored: Sat Oct 6 02:19:57 2018 +0100
Committer: Michael Stack 
Committed: Thu Oct 11 14:39:14 2018 -0700

--
 .../hadoop/hbase/wal/WALPrettyPrinter.java  | 24 ++--
 1 file changed, 22 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b73aab8b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index be83ae0..33522a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -81,6 +81,8 @@ public class WALPrettyPrinter {
   // for JSON encoding
   private static final ObjectMapper MAPPER = new ObjectMapper();
 
+  private long position;
+
   /**
* Basic constructor that simply initializes values to reasonable defaults.
*/
@@ -121,7 +123,8 @@ public class WALPrettyPrinter {
*  PrettyPrinter's output.
*/
   public WALPrettyPrinter(boolean outputValues, boolean outputJSON,
-  long sequence, String region, String row, boolean persistentOutput,
+  long sequence, String region, String row, boolean
+  persistentOutput,
   PrintStream out) {
 this.outputValues = outputValues;
 this.outputJSON = outputJSON;
@@ -269,7 +272,11 @@ public class WALPrettyPrinter {
   out.print("[");
   firstTxn = true;
 }
-
+
+if (position > 0) {
+  log.seek(position);
+}
+
 try {
   WAL.Entry entry;
   while ((entry = log.next()) != null) {
@@ -293,6 +300,8 @@ public class WALPrettyPrinter {
   if (row == null || ((String) op.get("row")).equals(row)) {
 actions.add(op);
   }
+  op.put("total_size_sum", CellUtil.estimatedHeapSizeOf(cell));
+
 }
 if (actions.size() == 0)
   continue;
@@ -317,8 +326,11 @@ public class WALPrettyPrinter {
   out.println("tag: " + op.get("tag"));
 }
 if (outputValues) out.println("value: " + op.get("value"));
+out.println("cell total size sum: " + op.get("total_size_sum"));
   }
 }
+out.println("edit heap size: " + entry.getEdit().heapSize());
+out.println("position: " + log.getPosition());
   }
 } finally {
   log.close();
@@ -377,6 +389,7 @@ public class WALPrettyPrinter {
 options.addOption("s", "sequence", true,
 "Sequence to filter by. Pass sequence number.");
 options.addOption("w", "row", true, "Row to filter by. Pass row name.");
+options.addOption("g", "goto", true, "Position to seek to in the file");
 
 WALPrettyPrinter printer = new WALPrettyPrinter();
 CommandLineParser parser = new PosixParser();
@@ -400,6 +413,9 @@ public class WALPrettyPrinter {
 printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s")));
   if (cmd.hasOption("w"))
 printer.setRowFilter(cmd.getOptionValue("w"));
+  if (cmd.hasOption("g")) {
+printer.setPosition(Long.parseLong(cmd.getOptionValue("g")));
+  }
 } catch (ParseException e) {
   e.printStackTrace();
   HelpFormatter formatter = new HelpFormatter();
@@ -423,4 +439,8 @@ public class WALPrettyPrinter {
 }
 printer.endPersistentOutput();
   }
+
+  public void setPosition(long position) {
+this.position = position;
+  }
 }



[1/2] hbase git commit: HBASE-21268 Backport to branch-2.0 " HBASE-21213 [hbck2] bypass leaves behind state in RegionStates when assign/unassign"

2018-10-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 5b6b85aed -> 5755f4a9f


http://git-wip-us.apache.org/repos/asf/hbase/blob/5755f4a9/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java
new file mode 100644
index 000..4da60e2
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+
+/**
+ * Tests bypass on a region assign/unassign
+ */
+@Category({LargeTests.class})
+public class TestRegionBypass {
+  private final static Logger LOG = 
LoggerFactory.getLogger(TestRegionBypass.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestRegionBypass.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private TableName tableName;
+
+  @BeforeClass
+  public static void startCluster() throws Exception {
+TEST_UTIL.startMiniCluster(2);
+  }
+
+  @AfterClass
+  public static void stopCluster() throws Exception {
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void before() throws IOException {
+this.tableName = TableName.valueOf(this.name.getMethodName());
+// Create a table. Has one region at least.
+TEST_UTIL.createTable(this.tableName, Bytes.toBytes("cf"));
+
+  }
+
+  @Test
+  public void testBypass() throws IOException {
+Admin admin = TEST_UTIL.getAdmin();
+List regions = admin.getRegions(this.tableName);
+for (RegionInfo ri: regions) {
+  admin.unassign(ri.getRegionName(), false);
+}
+List pids = new ArrayList<>(regions.size());
+for (RegionInfo ri: regions) {
+  Procedure p = new StallingAssignProcedure(ri);
+  
pids.add(TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().
+  submitProcedure(p));
+}
+for (Long pid: pids) {
+  while 
(!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().isStarted(pid))
 {
+Thread.currentThread().yield();
+  }
+}
+// Call bypass on all. We should be stuck in the dispatch at this stage.
+List> ps =
+
TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().getProcedures();
+for (Procedure p: ps) {
+  if (p instanceof StallingAssignProcedure) {
+List bs = TEST_UTIL.getHbck().
+bypassProcedure(Arrays.asList(p.getProcId()), 0, false, 
false);
+for (Boolean b: bs) {
+  LOG.info("BYPASSED {} {}", p.getProcId(), b);
+}
+  }
+}
+// Countdown the latch so its not hanging out.
+for (Procedure p: ps) {
+  if (p instanceof StallingAssignProcedure) {
+

[2/2] hbase git commit: HBASE-21268 Backport to branch-2.0 " HBASE-21213 [hbck2] bypass leaves behind state in RegionStates when assign/unassign"

2018-10-11 Thread stack
HBASE-21268 Backport to branch-2.0 " HBASE-21213 [hbck2] bypass leaves behind 
state in RegionStates when assign/unassign"

Below is comment on HBASE-21213. This backport includes a good bit
of HBASE-21156 needed because this patch modifies the API it adds
for hbck2.

HBASE-21213 [hbck2] bypass leaves behind state in RegionStates when 
assign/unassign

Adds override to assigns and unassigns. Changes bypass 'force'
to align calling the param 'override' instead.

Adds recursive to 'bypass', a means of calling bypass on
parent and its subprocedures (usually bypass works on
leaf nodes rippling the bypass up to parent -- recursive
has us work in the opposite direction): EXPERIMENTAL.

bypass on an assign/unassign leaves region in RIT and the
RegionStateNode loaded with the bypassed procedure. First
implementation had assign/unassign cleanup leftover state.
Second implementation, on feedback, keeps the state in place
as a fence against other Procedures assuming the region entity,
and instead adds an 'override' function that hbck2 can set on
assigns/unassigns to override the fencing.

Note that the below also converts ProcedureExceptions that
come out of the Pv2 system into DoNotRetryIOEs. It is a
little awkward because DNRIOE is in client-module, not
in procedure module. Previous, we'd just keep retrying
the bypass, etc.

M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 Have bypass take an environment like all other methods so subclasses.
 Fix javadoc issues.

M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 Javadoc issues. Pass environment when we invoke bypass.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
 Rename waitUntilNamespace... etc. to align with how these method types
 are named elsehwere .. i.e. waitFor rather than waitUntil..

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 Cleanup message we emit when we find an exisitng procedure working
 against this entity.
 Add support for a force function which allows Assigns/Unassigns force
 ownership of the Region entity.

A 
hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionBypass.java
 Test bypass and force.

M hbase-shell/src/main/ruby/shell/commands/list_procedures.rb
 Minor cleanup of the json output... do iso8601 timestamps.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5755f4a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5755f4a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5755f4a9

Branch: refs/heads/branch-2.0
Commit: 5755f4a9f3d0814d8b6141189d59f43f04e2197b
Parents: 5b6b85a
Author: Michael Stack 
Authored: Thu Sep 20 16:53:58 2018 -0700
Committer: Michael Stack 
Committed: Thu Oct 11 14:31:59 2018 -0700

--
 .../apache/hadoop/hbase/client/HBaseHbck.java   |  71 +++-
 .../org/apache/hadoop/hbase/client/Hbck.java|  65 ++-
 .../hbase/shaded/protobuf/RequestConverter.java |  24 +++
 .../hadoop/hbase/util/RetryCounterFactory.java  |   4 +
 .../hadoop/hbase/procedure2/Procedure.java  |  30 +--
 .../hbase/procedure2/ProcedureExecutor.java |  66 +--
 .../hadoop/hbase/procedure2/ProcedureUtil.java  |   2 +-
 .../procedure2/RemoteProcedureException.java|   6 +-
 .../hbase/procedure2/TestProcedureBypass.java   |  23 ++-
 .../src/main/protobuf/Master.proto  |  71 
 .../src/main/protobuf/MasterProcedure.proto |   2 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   3 +-
 .../balancer/TestRSGroupBasedLoadBalancer.java  |   3 +
 .../org/apache/hadoop/hbase/master/HMaster.java |  86 -
 .../hadoop/hbase/master/MasterRpcServices.java  | 124 -
 .../master/assignment/AssignProcedure.java  |  14 +-
 .../master/assignment/AssignmentManager.java|  32 +++-
 .../assignment/RegionTransitionProcedure.java   |  78 ++--
 .../master/assignment/UnassignProcedure.java|  14 +-
 .../master/procedure/MasterProcedureUtil.java   |  16 ++
 .../master/procedure/ProcedureDescriber.java|   3 +-
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/procedure/ProcedureSyncWait.java |   5 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  16 ++
 .../apache/hadoop/hbase/client/TestHbck.java| 128 ++---
 .../hadoop/hbase/client/TestMultiParallel.java  |  29 ++-
 .../master/assignment/TestRegionBypass.java | 181 +++
 .../main/ruby/shell/commands/list_procedures.rb |   3 +-
 .../apache/hadoop/hbase/client/TestShell.java   |   1 -
 hbase-shell/src/test/ruby/shell/shell_test.rb   |   6 +-
 30 files changed, 976 insertions(+), 132 deletions(-)
--



hbase git commit: HBASE-21247 Allow WAL Provider to be specified by configuration without explicit enum in Providers - revert

2018-10-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 42d7ddc67 -> 924d183ba


HBASE-21247 Allow WAL Provider to be specified by configuration without 
explicit enum in Providers - revert


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/924d183b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/924d183b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/924d183b

Branch: refs/heads/master
Commit: 924d183ba0e67b975e998f6006c993f457e03c20
Parents: 42d7ddc
Author: tedyu 
Authored: Thu Oct 11 09:00:24 2018 -0700
Committer: tedyu 
Committed: Thu Oct 11 09:00:24 2018 -0700

--
 .../hbase/wal/RegionGroupingProvider.java   |  5 +--
 .../org/apache/hadoop/hbase/wal/WALFactory.java | 34 +++-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |  5 +--
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 43 
 4 files changed, 15 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/924d183b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index e4390c9..0b7b8da 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -121,8 +121,6 @@ public class RegionGroupingProvider implements WALProvider {
 
   /** delegate provider for WAL creation/roll/close */
   public static final String DELEGATE_PROVIDER = 
"hbase.wal.regiongrouping.delegate.provider";
-  public static final String DELEGATE_PROVIDER_CLASS =
-"hbase.wal.regiongrouping.delegate.provider.class";
   public static final String DEFAULT_DELEGATE_PROVIDER = 
WALFactory.Providers.defaultProvider
   .name();
 
@@ -157,8 +155,7 @@ public class RegionGroupingProvider implements WALProvider {
 }
 this.providerId = sb.toString();
 this.strategy = getStrategy(conf, REGION_GROUPING_STRATEGY, 
DEFAULT_REGION_GROUPING_STRATEGY);
-this.providerClass = factory.getProviderClass(DELEGATE_PROVIDER_CLASS, 
DELEGATE_PROVIDER,
-DEFAULT_DELEGATE_PROVIDER);
+this.providerClass = factory.getProviderClass(DELEGATE_PROVIDER, 
DEFAULT_DELEGATE_PROVIDER);
   }
 
   private WALProvider createProvider(String group) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/924d183b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 964d049..0e6e365 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -80,11 +80,8 @@ public class WALFactory {
 
   public static final String WAL_PROVIDER = "hbase.wal.provider";
   static final String DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
-  public static final String WAL_PROVIDER_CLASS = "hbase.wal.provider.class";
-  static final Class DEFAULT_WAL_PROVIDER_CLASS = 
AsyncFSWALProvider.class;
 
   public static final String META_WAL_PROVIDER = "hbase.wal.meta_provider";
-  public static final String META_WAL_PROVIDER_CLASS = 
"hbase.wal.meta_provider.class";
 
   final String factoryId;
   private final WALProvider provider;
@@ -128,25 +125,7 @@ public class WALFactory {
   }
 
   @VisibleForTesting
-  /*
-   * @param clsKey config key for provider classname
-   * @param key config key for provider enum
-   * @param defaultValue default value for provider enum
-   * @return Class which extends WALProvider
-   */
-  public Class getProviderClass(String clsKey, String 
key,
-  String defaultValue) {
-String clsName = conf.get(clsKey);
-if (clsName == null || clsName.isEmpty()) {
-  clsName = conf.get(key, defaultValue);
-}
-if (clsName != null && !clsName.isEmpty()) {
-  try {
-return (Class) Class.forName(clsName);
-  } catch (ClassNotFoundException exception) {
-// try with enum key next
-  }
-}
+  public Class getProviderClass(String key, String 
defaultValue) {
 try {
   Providers provider = Providers.valueOf(conf.get(key, defaultValue));
 
@@ -170,7 +149,7 @@ public class WALFactory {
   // Fall back to them specifying a class name
   // Note that the passed default class shouldn't actually be used, since 
the above only fails
   // when there is a config 

[hbase] Git Push Summary

2018-10-11 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2-HBASE-21103 [deleted] c521632a1


[5/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 5b6b85aed657edc0e476e6c7ddb4d8dcb0283d54)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9050402f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9050402f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9050402f

Branch: refs/heads/branch-1
Commit: 9050402ff2093b7c5425db5c640314fd91df16fb
Parents: ed7beac
Author: Sean Busbey 
Authored: Thu Oct 11 10:32:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:33:03 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9050402f/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 0abeae0..c2313e2 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[4/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b6b85ae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b6b85ae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b6b85ae

Branch: refs/heads/branch-2.0
Commit: 5b6b85aed657edc0e476e6c7ddb4d8dcb0283d54
Parents: a060008
Author: Sean Busbey 
Authored: Thu Oct 11 10:32:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:32:12 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b6b85ae/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index bc445a1..b89961f 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[7/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 5b6b85aed657edc0e476e6c7ddb4d8dcb0283d54)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e1836c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e1836c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e1836c2

Branch: refs/heads/branch-1.3
Commit: 1e1836c2f07eb440f3dc4568f3194826b9a69b6d
Parents: 171f8f0
Author: Sean Busbey 
Authored: Thu Oct 11 10:32:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:34:14 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e1836c2/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 0abeae0..c2313e2 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[3/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 42d7ddc6780e7b875069c3703fc68619708e1614)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c9c74364
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c9c74364
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c9c74364

Branch: refs/heads/branch-2.1
Commit: c9c7436482099ff29ad7f765094fdc3b1e69ac55
Parents: e726a89
Author: Sean Busbey 
Authored: Thu Oct 11 09:11:32 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:30:33 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c9c74364/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 58d414e..0abb366 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -80,7 +80,8 @@ pipeline {
   if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
 YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
 echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-if [ ! -d "${YETUS_DIR}" ]; then
+if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; 
then
+  rm -rf "${YETUS_DIR}"
   
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
   --working-dir "${WORKSPACE}/downloads-yetus" \
   --keys 'https://www.apache.org/dist/yetus/KEYS' \



[8/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 5b6b85aed657edc0e476e6c7ddb4d8dcb0283d54)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b9dfb9a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b9dfb9a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b9dfb9a

Branch: refs/heads/branch-1.2
Commit: 6b9dfb9aaeaf69843c17294378c6660963499155
Parents: ff29edc
Author: Sean Busbey 
Authored: Thu Oct 11 10:32:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:34:40 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b9dfb9a/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index a32137a..606ff8c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[1/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ed7beacf9 -> 9050402ff
  refs/heads/branch-1.2 ff29edc85 -> 6b9dfb9aa
  refs/heads/branch-1.3 171f8f066 -> 1e1836c2f
  refs/heads/branch-1.4 0b1ae1c53 -> bd690be7a
  refs/heads/branch-2 c84c39902 -> a074269ee
  refs/heads/branch-2.0 a06000895 -> 5b6b85aed
  refs/heads/branch-2.1 e726a89f5 -> c9c743648
  refs/heads/master eec1479f7 -> 42d7ddc67


HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/42d7ddc6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/42d7ddc6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/42d7ddc6

Branch: refs/heads/master
Commit: 42d7ddc6780e7b875069c3703fc68619708e1614
Parents: eec1479
Author: Sean Busbey 
Authored: Thu Oct 11 09:11:32 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:29:07 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/42d7ddc6/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index bbff87c..b333afb 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -80,7 +80,8 @@ pipeline {
   if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
 YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
 echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-if [ ! -d "${YETUS_DIR}" ]; then
+if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; 
then
+  rm -rf "${YETUS_DIR}"
   
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
   --working-dir "${WORKSPACE}/downloads-yetus" \
   --keys 'https://www.apache.org/dist/yetus/KEYS' \



[6/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 5b6b85aed657edc0e476e6c7ddb4d8dcb0283d54)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bd690be7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bd690be7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bd690be7

Branch: refs/heads/branch-1.4
Commit: bd690be7a1f2ac979a2a9cca3e08aa2ec630c79e
Parents: 0b1ae1c
Author: Sean Busbey 
Authored: Thu Oct 11 10:32:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:33:45 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bd690be7/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 0abeae0..c2313e2 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[2/8] hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
HBASE-21103 nightly job should make sure cached yetus will run.

Signed-off-by: Mike Drob 
(cherry picked from commit 42d7ddc6780e7b875069c3703fc68619708e1614)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a074269e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a074269e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a074269e

Branch: refs/heads/branch-2
Commit: a074269ee07ad08d68d53c10c946f826f8de7fe0
Parents: c84c399
Author: Sean Busbey 
Authored: Thu Oct 11 09:11:32 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 10:29:53 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a074269e/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 58d414e..0abb366 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -80,7 +80,8 @@ pipeline {
   if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
 YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
 echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-if [ ! -d "${YETUS_DIR}" ]; then
+if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; 
then
+  rm -rf "${YETUS_DIR}"
   
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
   --working-dir "${WORKSPACE}/downloads-yetus" \
   --keys 'https://www.apache.org/dist/yetus/KEYS' \



[2/2] hbase git commit: HBASE-21282 Upgrade to latest jetty 9.3 versions

2018-10-11 Thread elserj
HBASE-21282 Upgrade to latest jetty 9.3 versions

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eec1479f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eec1479f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eec1479f

Branch: refs/heads/master
Commit: eec1479f710a6ae9b7ec6d67a1721ab540e02185
Parents: 8b66dea
Author: Josh Elser 
Authored: Thu Oct 11 11:05:48 2018 -0400
Committer: Josh Elser 
Committed: Thu Oct 11 11:28:24 2018 -0400

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eec1479f/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a345088..c26ab69 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1492,7 +1492,7 @@
 3.2.1
 2.9.2
 2.2.12
-9.3.19.v20170502
+9.3.25.v20180904
 3.1.0
 2.0.1
 



[1/2] hbase git commit: HBASE-21282 Upgrade to latest jetty 9.3 versions

2018-10-11 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 33d72e4db -> c84c39902
  refs/heads/master 8b66dea2f -> eec1479f7


HBASE-21282 Upgrade to latest jetty 9.3 versions

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c84c3990
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c84c3990
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c84c3990

Branch: refs/heads/branch-2
Commit: c84c3990205c7d28ac6fb127c49692fe82387aa2
Parents: 33d72e4
Author: Josh Elser 
Authored: Thu Oct 11 11:05:48 2018 -0400
Committer: Josh Elser 
Committed: Thu Oct 11 11:28:13 2018 -0400

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c84c3990/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 490e562..acfeb4c 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1350,7 +1350,7 @@
 3.2.1
 2.9.2
 2.2.12
-9.3.19.v20170502
+9.3.25.v20180904
 3.1.0
 2.0.1
 



hbase git commit: HBASE-21103 nightly job should make sure cached yetus will run.

2018-10-11 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-21103 [deleted] c521632a1
  refs/heads/branch-1.2-HBASE-21103 [created] c521632a1


HBASE-21103 nightly job should make sure cached yetus will run.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c521632a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c521632a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c521632a

Branch: refs/heads/branch-1.2-HBASE-21103
Commit: c521632a1eac92b4e243b6f1f5c031681eedf6eb
Parents: ff29edc
Author: Sean Busbey 
Authored: Thu Oct 11 00:02:09 2018 -0500
Committer: Sean Busbey 
Committed: Thu Oct 11 00:02:09 2018 -0500

--
 dev-support/Jenkinsfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c521632a/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index a32137a..606ff8c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -64,8 +64,9 @@ echo "Ensure we have a copy of Apache Yetus."
 if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
   YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
   echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
-  if [ ! -d "${YETUS_DIR}" ]; then
+  if ! "${YETUS_DIR}/bin/test-patch" --version >/dev/null 2>&1 ; then
 echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${YETUS_DIR}"
 rm -rf "${WORKSPACE}/.gpg"
 mkdir -p "${WORKSPACE}/.gpg"
 chmod -R 700 "${WORKSPACE}/.gpg"



[2/3] hbase git commit: HBASE-21281 Upgrade bouncycastle to latest

2018-10-11 Thread elserj
HBASE-21281 Upgrade bouncycastle to latest

BC 1.47 introduced some incompatible API changes which came in via
a new Maven artifact. We don't use any changed API in HBase. This
also removes some unnecessary dependencies on bcprov in other
modules (presumably, they are vestiges)

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0600089
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0600089
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0600089

Branch: refs/heads/branch-2.0
Commit: a06000895fe8682495e5808e3f694cba184cfb3a
Parents: aca3509
Author: Josh Elser 
Authored: Tue Oct 9 13:06:10 2018 -0400
Committer: Josh Elser 
Committed: Thu Oct 11 10:58:23 2018 -0400

--
 hbase-endpoint/pom.xml | 6 --
 hbase-http/pom.xml | 2 +-
 hbase-server/pom.xml   | 5 -
 pom.xml| 4 ++--
 4 files changed, 3 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0600089/hbase-endpoint/pom.xml
--
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 0633ce9..1f201ed 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -229,12 +229,6 @@
   mockito-core
   test
 
-
-
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0600089/hbase-http/pom.xml
--
diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml
index 761ac69..0f2842c 100644
--- a/hbase-http/pom.xml
+++ b/hbase-http/pom.xml
@@ -263,7 +263,7 @@
 
 
   org.bouncycastle
-  bcprov-jdk16
+  bcprov-jdk15on
   test
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0600089/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index eef86aa..2fb433f 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -532,11 +532,6 @@
   test
 
 
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
-
   org.apache.kerby
   kerb-client
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0600089/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a8d..b0dfb48 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1377,7 +1377,7 @@
 2.1.11
 1.0.18
 2.12.2
-1.46
+1.60
 1.0.1
 1.0.0
 4.0.0
@@ -1993,7 +1993,7 @@
   
   
 org.bouncycastle
-bcprov-jdk16
+bcprov-jdk15on
 ${bouncycastle.version}
 test
   



[1/3] hbase git commit: HBASE-21281 Upgrade bouncycastle to latest

2018-10-11 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 81adb704c -> 33d72e4db
  refs/heads/branch-2.0 aca3509fd -> a06000895
  refs/heads/master db9a5b7da -> 8b66dea2f


HBASE-21281 Upgrade bouncycastle to latest

BC 1.47 introduced some incompatible API changes which came in via
a new Maven artifact. We don't use any changed API in HBase. This
also removes some unnecessary dependencies on bcprov in other
modules (presumably, they are vestiges)

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33d72e4d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33d72e4d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33d72e4d

Branch: refs/heads/branch-2
Commit: 33d72e4db4a011841d81c2bd767ac438cb05b0c2
Parents: 81adb70
Author: Josh Elser 
Authored: Tue Oct 9 13:06:10 2018 -0400
Committer: Josh Elser 
Committed: Thu Oct 11 10:53:02 2018 -0400

--
 hbase-endpoint/pom.xml | 6 --
 hbase-http/pom.xml | 2 +-
 hbase-server/pom.xml   | 5 -
 pom.xml| 4 ++--
 4 files changed, 3 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/33d72e4d/hbase-endpoint/pom.xml
--
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 2e1d9f8..e91ea86 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -229,12 +229,6 @@
   mockito-core
   test
 
-
-
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/33d72e4d/hbase-http/pom.xml
--
diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml
index ca68c50..87036ac 100644
--- a/hbase-http/pom.xml
+++ b/hbase-http/pom.xml
@@ -263,7 +263,7 @@
 
 
   org.bouncycastle
-  bcprov-jdk16
+  bcprov-jdk15on
   test
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/33d72e4d/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index aced859..8df8988 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -538,11 +538,6 @@
   test
 
 
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
-
   org.apache.kerby
   kerb-client
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/33d72e4d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index b2f66b5..490e562 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1380,7 +1380,7 @@
 2.1.11
 1.0.18
 2.12.2
-1.46
+1.60
 1.0.1
 1.0.0
 4.0.0
@@ -2032,7 +2032,7 @@
   
   
 org.bouncycastle
-bcprov-jdk16
+bcprov-jdk15on
 ${bouncycastle.version}
 test
   



[3/3] hbase git commit: HBASE-21281 Upgrade bouncycastle to latest

2018-10-11 Thread elserj
HBASE-21281 Upgrade bouncycastle to latest

BC 1.47 introduced some incompatible API changes which came in via
a new Maven artifact. We don't use any changed API in HBase. This
also removes some unnecessary dependencies on bcprov in other
modules (presumably, they are vestiges)

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b66dea2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b66dea2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b66dea2

Branch: refs/heads/master
Commit: 8b66dea2ff034315336d7b768f0b7667413e6e3d
Parents: db9a5b7
Author: Josh Elser 
Authored: Tue Oct 9 13:06:10 2018 -0400
Committer: Josh Elser 
Committed: Thu Oct 11 11:02:32 2018 -0400

--
 hbase-endpoint/pom.xml | 6 --
 hbase-http/pom.xml | 2 +-
 hbase-server/pom.xml   | 5 -
 pom.xml| 4 ++--
 4 files changed, 3 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b66dea2/hbase-endpoint/pom.xml
--
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 8950de1..554a01f 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -229,12 +229,6 @@
   mockito-core
   test
 
-
-
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b66dea2/hbase-http/pom.xml
--
diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml
index 667b3f5..fb70451 100644
--- a/hbase-http/pom.xml
+++ b/hbase-http/pom.xml
@@ -263,7 +263,7 @@
 
 
   org.bouncycastle
-  bcprov-jdk16
+  bcprov-jdk15on
   test
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b66dea2/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 473b740..9f1454b 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -538,11 +538,6 @@
   test
 
 
-  org.bouncycastle
-  bcprov-jdk16
-  test
-
-
   org.apache.kerby
   kerb-client
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b66dea2/pom.xml
--
diff --git a/pom.xml b/pom.xml
index b4ae6a8..a345088 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1522,7 +1522,7 @@
 2.1.11
 1.0.18
 2.12.2
-1.46
+1.60
 1.0.1
 1.0.0
 4.0.0
@@ -2199,7 +2199,7 @@
   
   
 org.bouncycastle
-bcprov-jdk16
+bcprov-jdk15on
 ${bouncycastle.version}
 test
   



hbase-site git commit: INFRA-10751 Empty commit

2018-10-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6b0020b16 -> 9ab80c17b


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/9ab80c17
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/9ab80c17
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/9ab80c17

Branch: refs/heads/asf-site
Commit: 9ab80c17b419b91829bf1e51b7ed52737870c087
Parents: 6b0020b
Author: jenkins 
Authored: Thu Oct 11 14:52:58 2018 +
Committer: jenkins 
Committed: Thu Oct 11 14:52:58 2018 +

--

--




[06/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
index 92967f2..d69bb8c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
@@ -88,404 +88,428 @@
 080
 081  public static final String WAL_PROVIDER 
= "hbase.wal.provider";
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
-083
-084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
+083  public static final String 
WAL_PROVIDER_CLASS = "hbase.wal.provider.class";
+084  static final Class? extends 
WALProvider DEFAULT_WAL_PROVIDER_CLASS = AsyncFSWALProvider.class;
 085
-086  final String factoryId;
-087  private final WALProvider provider;
-088  // The meta updates are written to a 
different wal. If this
-089  // regionserver holds meta regions, 
then this ref will be non-null.
-090  // lazily intialized; most 
RegionServers don't deal with META
-091  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-092
-093  /**
-094   * Configuration-specified WAL Reader 
used when a custom reader is requested
-095   */
-096  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-097
-098  /**
-099   * How long to attempt opening 
in-recovery wals
-100   */
-101  private final int timeoutMillis;
-102
-103  private final Configuration conf;
-104
-105  // Used for the singleton WALFactory, 
see below.
-106  private WALFactory(Configuration conf) 
{
-107// this code is duplicated here so we 
can keep our members final.
-108// until we've moved reader/writer 
construction down into providers, this initialization must
-109// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-110timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-111/* TODO Both of these are probably 
specific to the fs wal provider */
-112logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-113  
AbstractFSWALProvider.Reader.class);
-114this.conf = conf;
-115// end required early 
initialization
-116
-117// this instance can't create wals, 
just reader/writers.
-118provider = null;
-119factoryId = SINGLETON_ID;
-120  }
-121
-122  @VisibleForTesting
-123  Providers getDefaultProvider() {
-124return Providers.defaultProvider;
-125  }
-126
-127  @VisibleForTesting
-128  public Class? extends 
WALProvider getProviderClass(String key, String defaultValue) {
-129try {
-130  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-131
-132  // AsyncFSWALProvider is not 
guaranteed to work on all Hadoop versions, when it's chosen as
-133  // the default and we can't use it, 
we want to fall back to FSHLog which we know works on
-134  // all versions.
-135  if (provider == 
getDefaultProvider()  provider.clazz == AsyncFSWALProvider.class
-136   
!AsyncFSWALProvider.load()) {
-137// AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will
-138// try to use it if possible. It 
deeply hacks into the internal of DFSClient so will be
-139// easily broken when upgrading 
hadoop.
-140LOG.warn("Failed to load 
AsyncFSWALProvider, falling back to FSHLogProvider");
-141return FSHLogProvider.class;
-142  }
-143
-144  // N.b. If the user specifically 
requested AsyncFSWALProvider but their environment doesn't
-145  // support using it (e.g. 
AsyncFSWALProvider.load() == false), we should let this fail and
-146  // not fall back to 
FSHLogProvider.
-147  return provider.clazz;
-148} catch (IllegalArgumentException 
exception) {
-149  // Fall back to them specifying a 
class name
-150  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-151  // when there is a config value 
present.
-152  return conf.getClass(key, 
Providers.defaultProvider.clazz, WALProvider.class);
-153}
-154  }
-155
-156  static WALProvider 
createProvider(Class? extends WALProvider clazz) throws IOException {
-157LOG.info("Instantiating WALProvider 
of type {}", clazz);
-158try {
-159  return 
clazz.getDeclaredConstructor().newInstance();
-160} catch (Exception e) {
-161  LOG.error("couldn't set up 
WALProvider, the configured class is " + clazz);
-162  LOG.debug("Exception details for 
failure to load WALProvider.", e);
-163  throw new IOException("couldn't set 
up WALProvider", e);
-164}
-165  }
-166
-167  /**
-168   * @param conf must not be null, will 
keep a reference to read params 

[01/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 76087e738 -> 6b0020b16


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.html
index 080823f..1b350c9 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.html
@@ -35,265 +35,268 @@
 027import java.util.Collection;
 028import java.util.Collections;
 029import java.util.List;
-030import 
org.apache.hadoop.conf.Configuration;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.client.RegionInfo;
-035// imports for things that haven't moved 
from regionserver.wal yet.
-036import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-037import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
-038import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-039import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-040import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045/**
-046 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
-047 * interactions with HDFS.
-048 * p
-049 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
-050 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
-051 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
-052 * separated list of allowed 
operations:
-053 * ul
-054 * liemappend/em 
: edits will be written to the underlying filesystem/li
-055 * liemsync/em : 
wal syncs will result in hflush calls/li
-056 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
-057 * filesystem./li
-058 * /ul
-059 * Additionally, the special cases "all" 
and "none" are recognized. If ommited, the value defaults
-060 * to "all." Behavior is undefined if 
"all" or "none" are paired with additional values. Behavior is
-061 * also undefined if values not listed 
above are included.
-062 * p
-063 * Only those operations listed will 
occur between the returned WAL and HDFS. All others will be
-064 * no-ops.
-065 * p
-066 * Note that in the case of allowing 
"append" operations but not allowing "fileroll", the returned
-067 * WAL will just keep writing to the same 
file. This won't avoid all costs associated with file
-068 * management over time, becaue the data 
set size may result in additional HDFS block allocations.
-069 */
-070@InterfaceAudience.Private
-071public class IOTestProvider implements 
WALProvider {
-072  private static final Logger LOG = 
LoggerFactory.getLogger(IOTestProvider.class);
-073
-074  private static final String 
ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations";
-075  private enum AllowedOperations {
-076all,
-077append,
-078sync,
-079fileroll,
-080none
-081  }
-082
-083  private WALFactory factory;
+030import 
java.util.concurrent.atomic.AtomicBoolean;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileSystem;
+034import org.apache.hadoop.fs.Path;
+035import 
org.apache.hadoop.hbase.HConstants;
+036import 
org.apache.hadoop.hbase.client.RegionInfo;
+037// imports for things that haven't moved 
from regionserver.wal yet.
+038import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+039import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
+040import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
+042import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+043import 
org.apache.yetus.audience.InterfaceAudience;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047/**
+048 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
+049 * interactions with HDFS.
+050 * p
+051 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
+052 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
+053 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
+054 * separated list of allowed 
operations:
+055 * ul
+056 * liemappend/em 
: edits will be written to the underlying filesystem/li
+057 * liemsync/em : 
wal syncs will result in hflush calls/li
+058 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
+059 * filesystem./li

[11/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/6b0020b1
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/6b0020b1
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/6b0020b1

Branch: refs/heads/asf-site
Commit: 6b0020b160a54417788b0ab36d1f12a413125e26
Parents: 76087e7
Author: jenkins 
Authored: Thu Oct 11 14:52:39 2018 +
Committer: jenkins 
Committed: Thu Oct 11 14:52:39 2018 +

--
 acid-semantics.html |   4 +-
 apache_hbase_reference_guide.pdf|   4 +-
 book.html   |   2 +-
 bulk-loads.html |   4 +-
 checkstyle-aggregate.html   |  22 +-
 coc.html|   4 +-
 dependencies.html   |   4 +-
 dependency-convergence.html |   4 +-
 dependency-info.html|   4 +-
 dependency-management.html  |   4 +-
 devapidocs/constant-values.html |  27 +-
 devapidocs/index-all.html   |  10 +-
 .../hadoop/hbase/backup/package-tree.html   |   2 +-
 .../hadoop/hbase/client/package-tree.html   |  24 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |   2 +-
 .../hadoop/hbase/filter/package-tree.html   |   8 +-
 .../hadoop/hbase/io/hfile/package-tree.html |   4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |   4 +-
 .../hadoop/hbase/mapreduce/package-tree.html|   2 +-
 .../hbase/master/balancer/package-tree.html |   2 +-
 .../hadoop/hbase/master/package-tree.html   |   2 +-
 .../hbase/master/procedure/package-tree.html|   4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |  14 +-
 .../hadoop/hbase/procedure2/package-tree.html   |   6 +-
 .../hadoop/hbase/quotas/package-tree.html   |   6 +-
 .../hadoop/hbase/regionserver/package-tree.html |  20 +-
 .../regionserver/querymatcher/package-tree.html |   2 +-
 .../hbase/regionserver/wal/package-tree.html|   2 +-
 .../hadoop/hbase/replication/package-tree.html  |   2 +-
 .../hbase/security/access/package-tree.html |   4 +-
 .../hadoop/hbase/security/package-tree.html |   4 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   8 +-
 ...oupingProvider.IdentityGroupingStrategy.html |   8 +-
 .../hbase/wal/RegionGroupingProvider.html   |  71 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.html | 125 ++-
 .../hadoop/hbase/wal/class-use/WALProvider.html |   9 +-
 .../apache/hadoop/hbase/wal/package-tree.html   |   2 +-
 .../org/apache/hadoop/hbase/Version.html|   4 +-
 ...oupingProvider.IdentityGroupingStrategy.html | 325 
 ...GroupingProvider.RegionGroupingStrategy.html | 325 
 .../wal/RegionGroupingProvider.Strategies.html  | 325 
 .../hbase/wal/RegionGroupingProvider.html   | 325 
 .../hadoop/hbase/wal/WALFactory.Providers.html  | 816 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.html | 816 ++-
 downloads.html  |   4 +-
 export_control.html |   4 +-
 index.html  |   4 +-
 integration.html|   4 +-
 issue-tracking.html |   4 +-
 license.html|   4 +-
 mail-lists.html |   4 +-
 metrics.html|   4 +-
 old_news.html   |   4 +-
 plugin-management.html  |   4 +-
 plugins.html|   4 +-
 poweredbyhbase.html |   4 +-
 project-info.html   |   4 +-
 project-reports.html|   4 +-
 project-summary.html|   4 +-
 pseudo-distributed.html |   4 +-
 replication.html|   4 +-
 resources.html  |   4 +-
 source-repository.html  |   4 +-
 sponsors.html   |   4 +-
 supportingprojects.html |   4 +-
 team-list.html  |   4 +-
 testdevapidocs/index-all.html   |   6 +
 .../hadoop/hbase/backup/package-tree.html   |   2 +-
 .../hadoop/hbase/io/hfile/package-tree.html |   2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   8 +-
 .../hadoop/hbase/procedure/package-tree.html|   8 +-
 .../hadoop/hbase/procedure2/package-tree.html   |   2 +-
 .../hadoop/hbase/regionserver/package-tree.html |   4 +-
 .../apache/hadoop/hbase/test/package-tree.html  |   4 +-
 

[03/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWAL.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWAL.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWAL.html
index 080823f..1b350c9 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWAL.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWAL.html
@@ -35,265 +35,268 @@
 027import java.util.Collection;
 028import java.util.Collections;
 029import java.util.List;
-030import 
org.apache.hadoop.conf.Configuration;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.client.RegionInfo;
-035// imports for things that haven't moved 
from regionserver.wal yet.
-036import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-037import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
-038import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-039import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-040import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045/**
-046 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
-047 * interactions with HDFS.
-048 * p
-049 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
-050 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
-051 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
-052 * separated list of allowed 
operations:
-053 * ul
-054 * liemappend/em 
: edits will be written to the underlying filesystem/li
-055 * liemsync/em : 
wal syncs will result in hflush calls/li
-056 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
-057 * filesystem./li
-058 * /ul
-059 * Additionally, the special cases "all" 
and "none" are recognized. If ommited, the value defaults
-060 * to "all." Behavior is undefined if 
"all" or "none" are paired with additional values. Behavior is
-061 * also undefined if values not listed 
above are included.
-062 * p
-063 * Only those operations listed will 
occur between the returned WAL and HDFS. All others will be
-064 * no-ops.
-065 * p
-066 * Note that in the case of allowing 
"append" operations but not allowing "fileroll", the returned
-067 * WAL will just keep writing to the same 
file. This won't avoid all costs associated with file
-068 * management over time, becaue the data 
set size may result in additional HDFS block allocations.
-069 */
-070@InterfaceAudience.Private
-071public class IOTestProvider implements 
WALProvider {
-072  private static final Logger LOG = 
LoggerFactory.getLogger(IOTestProvider.class);
-073
-074  private static final String 
ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations";
-075  private enum AllowedOperations {
-076all,
-077append,
-078sync,
-079fileroll,
-080none
-081  }
-082
-083  private WALFactory factory;
+030import 
java.util.concurrent.atomic.AtomicBoolean;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileSystem;
+034import org.apache.hadoop.fs.Path;
+035import 
org.apache.hadoop.hbase.HConstants;
+036import 
org.apache.hadoop.hbase.client.RegionInfo;
+037// imports for things that haven't moved 
from regionserver.wal yet.
+038import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+039import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
+040import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
+042import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+043import 
org.apache.yetus.audience.InterfaceAudience;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047/**
+048 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
+049 * interactions with HDFS.
+050 * p
+051 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
+052 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
+053 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
+054 * separated list of allowed 
operations:
+055 * ul
+056 * liemappend/em 
: edits will be written to the underlying filesystem/li
+057 * liemsync/em : 
wal syncs will result in hflush calls/li
+058 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
+059 * filesystem./li
+060 * /ul
+061 * Additionally, the 

[10/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index eec8680..8051208 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -142,9 +142,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
-org.apache.hadoop.hbase.security.access.Permission.Action
 org.apache.hadoop.hbase.security.access.AccessController.OpType
+org.apache.hadoop.hbase.security.access.Permission.Action
+org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
index 6ef281d..7ba3a64 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -191,9 +191,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection
-org.apache.hadoop.hbase.security.SaslStatus
 org.apache.hadoop.hbase.security.AuthMethod
+org.apache.hadoop.hbase.security.SaslStatus
+org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index f9292f6..64bc1d5 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -518,14 +518,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.PrettyPrinter.Unit
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.PoolMap.PoolType
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.Order
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
-org.apache.hadoop.hbase.util.PoolMap.PoolType
-org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
 
b/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
index 288bb00..7114b2c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
+++ 

[07/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
index 92967f2..d69bb8c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
@@ -88,404 +88,428 @@
 080
 081  public static final String WAL_PROVIDER 
= "hbase.wal.provider";
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
-083
-084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
+083  public static final String 
WAL_PROVIDER_CLASS = "hbase.wal.provider.class";
+084  static final Class? extends 
WALProvider DEFAULT_WAL_PROVIDER_CLASS = AsyncFSWALProvider.class;
 085
-086  final String factoryId;
-087  private final WALProvider provider;
-088  // The meta updates are written to a 
different wal. If this
-089  // regionserver holds meta regions, 
then this ref will be non-null.
-090  // lazily intialized; most 
RegionServers don't deal with META
-091  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-092
-093  /**
-094   * Configuration-specified WAL Reader 
used when a custom reader is requested
-095   */
-096  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-097
-098  /**
-099   * How long to attempt opening 
in-recovery wals
-100   */
-101  private final int timeoutMillis;
-102
-103  private final Configuration conf;
-104
-105  // Used for the singleton WALFactory, 
see below.
-106  private WALFactory(Configuration conf) 
{
-107// this code is duplicated here so we 
can keep our members final.
-108// until we've moved reader/writer 
construction down into providers, this initialization must
-109// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-110timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-111/* TODO Both of these are probably 
specific to the fs wal provider */
-112logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-113  
AbstractFSWALProvider.Reader.class);
-114this.conf = conf;
-115// end required early 
initialization
-116
-117// this instance can't create wals, 
just reader/writers.
-118provider = null;
-119factoryId = SINGLETON_ID;
-120  }
-121
-122  @VisibleForTesting
-123  Providers getDefaultProvider() {
-124return Providers.defaultProvider;
-125  }
-126
-127  @VisibleForTesting
-128  public Class? extends 
WALProvider getProviderClass(String key, String defaultValue) {
-129try {
-130  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-131
-132  // AsyncFSWALProvider is not 
guaranteed to work on all Hadoop versions, when it's chosen as
-133  // the default and we can't use it, 
we want to fall back to FSHLog which we know works on
-134  // all versions.
-135  if (provider == 
getDefaultProvider()  provider.clazz == AsyncFSWALProvider.class
-136   
!AsyncFSWALProvider.load()) {
-137// AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will
-138// try to use it if possible. It 
deeply hacks into the internal of DFSClient so will be
-139// easily broken when upgrading 
hadoop.
-140LOG.warn("Failed to load 
AsyncFSWALProvider, falling back to FSHLogProvider");
-141return FSHLogProvider.class;
-142  }
-143
-144  // N.b. If the user specifically 
requested AsyncFSWALProvider but their environment doesn't
-145  // support using it (e.g. 
AsyncFSWALProvider.load() == false), we should let this fail and
-146  // not fall back to 
FSHLogProvider.
-147  return provider.clazz;
-148} catch (IllegalArgumentException 
exception) {
-149  // Fall back to them specifying a 
class name
-150  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-151  // when there is a config value 
present.
-152  return conf.getClass(key, 
Providers.defaultProvider.clazz, WALProvider.class);
-153}
-154  }
-155
-156  static WALProvider 
createProvider(Class? extends WALProvider clazz) throws IOException {
-157LOG.info("Instantiating WALProvider 
of type {}", clazz);
-158try {
-159  return 
clazz.getDeclaredConstructor().newInstance();
-160} catch (Exception e) {
-161  LOG.error("couldn't set up 
WALProvider, the configured class is " + clazz);
-162  LOG.debug("Exception details for 
failure to load WALProvider.", e);
-163  throw new IOException("couldn't set 
up WALProvider", e);
-164}
-165  }
-166
-167  /**
-168   * @param conf must 

[02/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
index 080823f..1b350c9 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
@@ -35,265 +35,268 @@
 027import java.util.Collection;
 028import java.util.Collections;
 029import java.util.List;
-030import 
org.apache.hadoop.conf.Configuration;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.client.RegionInfo;
-035// imports for things that haven't moved 
from regionserver.wal yet.
-036import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-037import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
-038import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-039import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-040import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045/**
-046 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
-047 * interactions with HDFS.
-048 * p
-049 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
-050 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
-051 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
-052 * separated list of allowed 
operations:
-053 * ul
-054 * liemappend/em 
: edits will be written to the underlying filesystem/li
-055 * liemsync/em : 
wal syncs will result in hflush calls/li
-056 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
-057 * filesystem./li
-058 * /ul
-059 * Additionally, the special cases "all" 
and "none" are recognized. If ommited, the value defaults
-060 * to "all." Behavior is undefined if 
"all" or "none" are paired with additional values. Behavior is
-061 * also undefined if values not listed 
above are included.
-062 * p
-063 * Only those operations listed will 
occur between the returned WAL and HDFS. All others will be
-064 * no-ops.
-065 * p
-066 * Note that in the case of allowing 
"append" operations but not allowing "fileroll", the returned
-067 * WAL will just keep writing to the same 
file. This won't avoid all costs associated with file
-068 * management over time, becaue the data 
set size may result in additional HDFS block allocations.
-069 */
-070@InterfaceAudience.Private
-071public class IOTestProvider implements 
WALProvider {
-072  private static final Logger LOG = 
LoggerFactory.getLogger(IOTestProvider.class);
-073
-074  private static final String 
ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations";
-075  private enum AllowedOperations {
-076all,
-077append,
-078sync,
-079fileroll,
-080none
-081  }
-082
-083  private WALFactory factory;
+030import 
java.util.concurrent.atomic.AtomicBoolean;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileSystem;
+034import org.apache.hadoop.fs.Path;
+035import 
org.apache.hadoop.hbase.HConstants;
+036import 
org.apache.hadoop.hbase.client.RegionInfo;
+037// imports for things that haven't moved 
from regionserver.wal yet.
+038import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+039import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
+040import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
+042import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+043import 
org.apache.yetus.audience.InterfaceAudience;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047/**
+048 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
+049 * interactions with HDFS.
+050 * p
+051 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
+052 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
+053 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
+054 * separated list of allowed 
operations:
+055 * ul
+056 * liemappend/em 
: edits will be written to the underlying filesystem/li
+057 * liemsync/em : 
wal syncs will result in hflush calls/li
+058 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
+059 * filesystem./li
+060 * /ul
+061 * 

[08/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
index be2a512..2142742 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.Strategies.html
@@ -129,171 +129,174 @@
 121
 122  /** delegate provider for WAL 
creation/roll/close */
 123  public static final String 
DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider";
-124  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
-125  .name();
-126
-127  private static final String 
META_WAL_GROUP_NAME = "meta";
+124  public static final String 
DELEGATE_PROVIDER_CLASS =
+125
"hbase.wal.regiongrouping.delegate.provider.class";
+126  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
+127  .name();
 128
-129  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
-130  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
-131
-132  private final KeyLockerString 
createLock = new KeyLocker();
+129  private static final String 
META_WAL_GROUP_NAME = "meta";
+130
+131  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
+132  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
 133
-134  private RegionGroupingStrategy 
strategy;
-135  private WALFactory factory;
-136  private Configuration conf;
-137  private ListWALActionsListener 
listeners = new ArrayList();
-138  private String providerId;
-139  private Class? extends 
WALProvider providerClass;
-140
-141  @Override
-142  public void init(WALFactory factory, 
Configuration conf, String providerId) throws IOException {
-143if (null != strategy) {
-144  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-145}
-146this.conf = conf;
-147this.factory = factory;
-148StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-149if (providerId != null) {
-150  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-151sb.append(providerId);
-152  } else {
-153
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-154  }
-155}
-156this.providerId = sb.toString();
-157this.strategy = getStrategy(conf, 
REGION_GROUPING_STRATEGY, DEFAULT_REGION_GROUPING_STRATEGY);
-158this.providerClass = 
factory.getProviderClass(DELEGATE_PROVIDER, DEFAULT_DELEGATE_PROVIDER);
-159  }
-160
-161  private WALProvider 
createProvider(String group) throws IOException {
-162WALProvider provider = 
WALFactory.createProvider(providerClass);
-163provider.init(factory, conf,
-164  
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group);
-165provider.addWALActionsListener(new 
MetricsWAL());
-166return provider;
-167  }
-168
-169  @Override
-170  public ListWAL getWALs() {
-171return 
cached.values().stream().flatMap(p - 
p.getWALs().stream()).collect(Collectors.toList());
-172  }
-173
-174  private WAL getWAL(String group) throws 
IOException {
-175WALProvider provider = 
cached.get(group);
-176if (provider == null) {
-177  Lock lock = 
createLock.acquireLock(group);
-178  try {
-179provider = cached.get(group);
-180if (provider == null) {
-181  provider = 
createProvider(group);
-182  
listeners.forEach(provider::addWALActionsListener);
-183  cached.put(group, provider);
-184}
-185  } finally {
-186lock.unlock();
-187  }
-188}
-189return provider.getWAL(null);
-190  }
-191
-192  @Override
-193  public WAL getWAL(RegionInfo region) 
throws IOException {
-194String group;
-195if 
(META_WAL_PROVIDER_ID.equals(this.providerId)) {
-196  group = META_WAL_GROUP_NAME;
-197} else {
-198  byte[] id;
-199  byte[] namespace;
-200  if (region != null) {
-201id = 
region.getEncodedNameAsBytes();
-202namespace = 
region.getTable().getNamespace();
-203  } else {
-204id = 
HConstants.EMPTY_BYTE_ARRAY;
-205namespace = null;
-206  }
-207  group = strategy.group(id, 
namespace);
-208}
-209return getWAL(group);
-210  }
-211
-212  @Override
-213  public void shutdown() throws 
IOException {
-214// save the last exception and 
rethrow
-215IOException failure = null;
-216for (WALProvider provider: 
cached.values()) {
-217  try {
-218provider.shutdown();
-219  } catch (IOException e) {
-220LOG.error("Problem 

[05/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/license.html
--
diff --git a/license.html b/license.html
index a548abd..22567b5 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -491,7 +491,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index c047626..2fee6c0 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Mailing Lists
 
@@ -341,7 +341,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 6c0cea6..0938acd 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Apache HBase (TM) Metrics
@@ -459,7 +459,7 @@ export HBASE_REGIONSERVER_OPTS=$HBASE_JMX_OPTS 
-Dcom.sun.management.jmxrem
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/old_news.html
--
diff --git a/old_news.html b/old_news.html
index cf4597f..45acdd2 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Old Apache HBase (TM) News
@@ -440,7 +440,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index 2402e86..a745e35 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -440,7 +440,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/plugins.html
--
diff --git a/plugins.html b/plugins.html
index 9251ffc..f87797f 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugins
 
@@ -375,7 +375,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index 4e82f53..6016ce6 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -769,7 +769,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-10
+  Last Published: 
2018-10-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 

[09/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
index be2a512..2142742 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
@@ -129,171 +129,174 @@
 121
 122  /** delegate provider for WAL 
creation/roll/close */
 123  public static final String 
DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider";
-124  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
-125  .name();
-126
-127  private static final String 
META_WAL_GROUP_NAME = "meta";
+124  public static final String 
DELEGATE_PROVIDER_CLASS =
+125
"hbase.wal.regiongrouping.delegate.provider.class";
+126  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
+127  .name();
 128
-129  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
-130  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
-131
-132  private final KeyLockerString 
createLock = new KeyLocker();
+129  private static final String 
META_WAL_GROUP_NAME = "meta";
+130
+131  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
+132  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
 133
-134  private RegionGroupingStrategy 
strategy;
-135  private WALFactory factory;
-136  private Configuration conf;
-137  private ListWALActionsListener 
listeners = new ArrayList();
-138  private String providerId;
-139  private Class? extends 
WALProvider providerClass;
-140
-141  @Override
-142  public void init(WALFactory factory, 
Configuration conf, String providerId) throws IOException {
-143if (null != strategy) {
-144  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-145}
-146this.conf = conf;
-147this.factory = factory;
-148StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-149if (providerId != null) {
-150  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-151sb.append(providerId);
-152  } else {
-153
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-154  }
-155}
-156this.providerId = sb.toString();
-157this.strategy = getStrategy(conf, 
REGION_GROUPING_STRATEGY, DEFAULT_REGION_GROUPING_STRATEGY);
-158this.providerClass = 
factory.getProviderClass(DELEGATE_PROVIDER, DEFAULT_DELEGATE_PROVIDER);
-159  }
-160
-161  private WALProvider 
createProvider(String group) throws IOException {
-162WALProvider provider = 
WALFactory.createProvider(providerClass);
-163provider.init(factory, conf,
-164  
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group);
-165provider.addWALActionsListener(new 
MetricsWAL());
-166return provider;
-167  }
-168
-169  @Override
-170  public ListWAL getWALs() {
-171return 
cached.values().stream().flatMap(p - 
p.getWALs().stream()).collect(Collectors.toList());
-172  }
-173
-174  private WAL getWAL(String group) throws 
IOException {
-175WALProvider provider = 
cached.get(group);
-176if (provider == null) {
-177  Lock lock = 
createLock.acquireLock(group);
-178  try {
-179provider = cached.get(group);
-180if (provider == null) {
-181  provider = 
createProvider(group);
-182  
listeners.forEach(provider::addWALActionsListener);
-183  cached.put(group, provider);
-184}
-185  } finally {
-186lock.unlock();
-187  }
-188}
-189return provider.getWAL(null);
-190  }
-191
-192  @Override
-193  public WAL getWAL(RegionInfo region) 
throws IOException {
-194String group;
-195if 
(META_WAL_PROVIDER_ID.equals(this.providerId)) {
-196  group = META_WAL_GROUP_NAME;
-197} else {
-198  byte[] id;
-199  byte[] namespace;
-200  if (region != null) {
-201id = 
region.getEncodedNameAsBytes();
-202namespace = 
region.getTable().getNamespace();
-203  } else {
-204id = 
HConstants.EMPTY_BYTE_ARRAY;
-205namespace = null;
-206  }
-207  group = strategy.group(id, 
namespace);
-208}
-209return getWAL(group);
-210  }
-211
-212  @Override
-213  public void shutdown() throws 
IOException {
-214// save the last exception and 
rethrow
-215IOException failure = null;
-216for (WALProvider provider: 
cached.values()) {
-217  try {
-218provider.shutdown();

[04/11] hbase-site git commit: Published site at 72552301ab772122dc4dd39941ad1acbe831f1ff.

2018-10-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b0020b1/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html
index 080823f..1b350c9 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/IOTestProvider.AllowedOperations.html
@@ -35,265 +35,268 @@
 027import java.util.Collection;
 028import java.util.Collections;
 029import java.util.List;
-030import 
org.apache.hadoop.conf.Configuration;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.client.RegionInfo;
-035// imports for things that haven't moved 
from regionserver.wal yet.
-036import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
-037import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
-038import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-039import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-040import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045/**
-046 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
-047 * interactions with HDFS.
-048 * p
-049 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
-050 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
-051 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
-052 * separated list of allowed 
operations:
-053 * ul
-054 * liemappend/em 
: edits will be written to the underlying filesystem/li
-055 * liemsync/em : 
wal syncs will result in hflush calls/li
-056 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
-057 * filesystem./li
-058 * /ul
-059 * Additionally, the special cases "all" 
and "none" are recognized. If ommited, the value defaults
-060 * to "all." Behavior is undefined if 
"all" or "none" are paired with additional values. Behavior is
-061 * also undefined if values not listed 
above are included.
-062 * p
-063 * Only those operations listed will 
occur between the returned WAL and HDFS. All others will be
-064 * no-ops.
-065 * p
-066 * Note that in the case of allowing 
"append" operations but not allowing "fileroll", the returned
-067 * WAL will just keep writing to the same 
file. This won't avoid all costs associated with file
-068 * management over time, becaue the data 
set size may result in additional HDFS block allocations.
-069 */
-070@InterfaceAudience.Private
-071public class IOTestProvider implements 
WALProvider {
-072  private static final Logger LOG = 
LoggerFactory.getLogger(IOTestProvider.class);
-073
-074  private static final String 
ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations";
-075  private enum AllowedOperations {
-076all,
-077append,
-078sync,
-079fileroll,
-080none
-081  }
-082
-083  private WALFactory factory;
+030import 
java.util.concurrent.atomic.AtomicBoolean;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileSystem;
+034import org.apache.hadoop.fs.Path;
+035import 
org.apache.hadoop.hbase.HConstants;
+036import 
org.apache.hadoop.hbase.client.RegionInfo;
+037// imports for things that haven't moved 
from regionserver.wal yet.
+038import 
org.apache.hadoop.hbase.regionserver.wal.FSHLog;
+039import 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter;
+040import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
+042import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+043import 
org.apache.yetus.audience.InterfaceAudience;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047/**
+048 * A WAL Provider that returns a single 
thread safe WAL that optionally can skip parts of our normal
+049 * interactions with HDFS.
+050 * p
+051 * This implementation picks a directory 
in HDFS based on the same mechanisms as the
+052 * {@link FSHLogProvider}. Users can 
configure how much interaction we have with HDFS with the
+053 * configuration property 
"hbase.wal.iotestprovider.operations". The value should be a comma
+054 * separated list of allowed 
operations:
+055 * ul
+056 * liemappend/em 
: edits will be written to the underlying filesystem/li
+057 * liemsync/em : 
wal syncs will result in hflush calls/li
+058 * 
liemfileroll/em : roll requests will result in creating 
a new file on the underlying
+059 * 

hbase git commit: HBASE-21287 Allow configuring test master initialization wait time.

2018-10-11 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 ed80fc5d6 -> aca3509fd


HBASE-21287 Allow configuring test master initialization wait time.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aca3509f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aca3509f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aca3509f

Branch: refs/heads/branch-2.0
Commit: aca3509fd7aa1b11e9e68304ab99430ef098fee7
Parents: ed80fc5
Author: Mike Drob 
Authored: Wed Oct 10 17:49:54 2018 -0500
Committer: Mike Drob 
Committed: Thu Oct 11 09:51:13 2018 -0500

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 69 +++-
 1 file changed, 39 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aca3509f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index ee7ecf3..8c92f66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -23,12 +23,13 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 
@@ -162,6 +163,8 @@ public class JVMClusterUtil {
*/
   public static String startup(final List masters,
   final List regionservers) throws 
IOException {
+// Implementation note: This method relies on timed sleeps in a loop. It's 
not great, and
+// should probably be re-written to use actual synchronization objects, 
but it's ok for now
 
 Configuration configuration = null;
 
@@ -177,21 +180,9 @@ public class JVMClusterUtil {
 // Wait for an active master
 //  having an active master before starting the region threads allows
 //  then to succeed on their connection to master
-long startTime = System.currentTimeMillis();
-while (findActiveMaster(masters) == null) {
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-  }
-  int startTimeout = configuration != null ? Integer.parseInt(
+final int startTimeout = configuration != null ? Integer.parseInt(
 configuration.get("hbase.master.start.timeout.localHBaseCluster", 
"3")) : 3;
-  if (System.currentTimeMillis() > startTime + startTimeout) {
-String msg = "Master not active after " + startTimeout + "ms";
-Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
-throw new RuntimeException(msg);
-  }
-}
+waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != 
null);
 
 if (regionservers != null) {
   for (JVMClusterUtil.RegionServerThread t: regionservers) {
@@ -201,32 +192,50 @@ public class JVMClusterUtil {
 
 // Wait for an active master to be initialized (implies being master)
 //  with this, when we return the cluster is complete
-startTime = System.currentTimeMillis();
-final int maxwait = 20;
-while (true) {
-  JVMClusterUtil.MasterThread t = findActiveMaster(masters);
-  if (t != null && t.master.isInitialized()) {
-return t.master.getServerName().toString();
+final int initTimeout = configuration != null ? Integer.parseInt(
+configuration.get("hbase.master.init.timeout.localHBaseCluster", 
"20")) : 20;
+waitForEvent(initTimeout, "initialized", () -> {
+JVMClusterUtil.MasterThread t = findActiveMaster(masters);
+// master thread should never be null at this point, but let's keep 
the check anyway
+return t != null && t.master.isInitialized();
   }
-  // REMOVE
-  if (System.currentTimeMillis() > startTime + 1) {
-try {
-  Thread.sleep(1000);
-} catch (InterruptedException e) {
-  throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-}
+);
+
+return findActiveMaster(masters).master.getServerName().toString();
+  }
+
+  /**
+   * Utility method to wait some time for an event to occur, and then return 
control to the 

hbase git commit: HBASE-21287 Allow configuring test master initialization wait time.

2018-10-11 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 e28396353 -> e726a89f5


HBASE-21287 Allow configuring test master initialization wait time.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e726a89f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e726a89f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e726a89f

Branch: refs/heads/branch-2.1
Commit: e726a89f5fbcc68c06713d973125e74bfd700840
Parents: e283963
Author: Mike Drob 
Authored: Wed Oct 10 17:49:54 2018 -0500
Committer: Mike Drob 
Committed: Thu Oct 11 09:50:57 2018 -0500

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 69 +++-
 1 file changed, 39 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e726a89f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index ee7ecf3..8c92f66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -23,12 +23,13 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 
@@ -162,6 +163,8 @@ public class JVMClusterUtil {
*/
   public static String startup(final List masters,
   final List regionservers) throws 
IOException {
+// Implementation note: This method relies on timed sleeps in a loop. It's 
not great, and
+// should probably be re-written to use actual synchronization objects, 
but it's ok for now
 
 Configuration configuration = null;
 
@@ -177,21 +180,9 @@ public class JVMClusterUtil {
 // Wait for an active master
 //  having an active master before starting the region threads allows
 //  then to succeed on their connection to master
-long startTime = System.currentTimeMillis();
-while (findActiveMaster(masters) == null) {
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-  }
-  int startTimeout = configuration != null ? Integer.parseInt(
+final int startTimeout = configuration != null ? Integer.parseInt(
 configuration.get("hbase.master.start.timeout.localHBaseCluster", 
"3")) : 3;
-  if (System.currentTimeMillis() > startTime + startTimeout) {
-String msg = "Master not active after " + startTimeout + "ms";
-Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
-throw new RuntimeException(msg);
-  }
-}
+waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != 
null);
 
 if (regionservers != null) {
   for (JVMClusterUtil.RegionServerThread t: regionservers) {
@@ -201,32 +192,50 @@ public class JVMClusterUtil {
 
 // Wait for an active master to be initialized (implies being master)
 //  with this, when we return the cluster is complete
-startTime = System.currentTimeMillis();
-final int maxwait = 20;
-while (true) {
-  JVMClusterUtil.MasterThread t = findActiveMaster(masters);
-  if (t != null && t.master.isInitialized()) {
-return t.master.getServerName().toString();
+final int initTimeout = configuration != null ? Integer.parseInt(
+configuration.get("hbase.master.init.timeout.localHBaseCluster", 
"20")) : 20;
+waitForEvent(initTimeout, "initialized", () -> {
+JVMClusterUtil.MasterThread t = findActiveMaster(masters);
+// master thread should never be null at this point, but let's keep 
the check anyway
+return t != null && t.master.isInitialized();
   }
-  // REMOVE
-  if (System.currentTimeMillis() > startTime + 1) {
-try {
-  Thread.sleep(1000);
-} catch (InterruptedException e) {
-  throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-}
+);
+
+return findActiveMaster(masters).master.getServerName().toString();
+  }
+
+  /**
+   * Utility method to wait some time for an event to occur, and then return 
control to the 

hbase git commit: HBASE-21287 Allow configuring test master initialization wait time.

2018-10-11 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4b93f95af -> 81adb704c


HBASE-21287 Allow configuring test master initialization wait time.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81adb704
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81adb704
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81adb704

Branch: refs/heads/branch-2
Commit: 81adb704c8e578db82e15245ea48a3efe732965d
Parents: 4b93f95
Author: Mike Drob 
Authored: Wed Oct 10 17:49:54 2018 -0500
Committer: Mike Drob 
Committed: Thu Oct 11 09:50:37 2018 -0500

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 69 +++-
 1 file changed, 39 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81adb704/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index ee7ecf3..8c92f66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -23,12 +23,13 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 
@@ -162,6 +163,8 @@ public class JVMClusterUtil {
*/
   public static String startup(final List masters,
   final List regionservers) throws 
IOException {
+// Implementation note: This method relies on timed sleeps in a loop. It's 
not great, and
+// should probably be re-written to use actual synchronization objects, 
but it's ok for now
 
 Configuration configuration = null;
 
@@ -177,21 +180,9 @@ public class JVMClusterUtil {
 // Wait for an active master
 //  having an active master before starting the region threads allows
 //  then to succeed on their connection to master
-long startTime = System.currentTimeMillis();
-while (findActiveMaster(masters) == null) {
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-  }
-  int startTimeout = configuration != null ? Integer.parseInt(
+final int startTimeout = configuration != null ? Integer.parseInt(
 configuration.get("hbase.master.start.timeout.localHBaseCluster", 
"3")) : 3;
-  if (System.currentTimeMillis() > startTime + startTimeout) {
-String msg = "Master not active after " + startTimeout + "ms";
-Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
-throw new RuntimeException(msg);
-  }
-}
+waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != 
null);
 
 if (regionservers != null) {
   for (JVMClusterUtil.RegionServerThread t: regionservers) {
@@ -201,32 +192,50 @@ public class JVMClusterUtil {
 
 // Wait for an active master to be initialized (implies being master)
 //  with this, when we return the cluster is complete
-startTime = System.currentTimeMillis();
-final int maxwait = 20;
-while (true) {
-  JVMClusterUtil.MasterThread t = findActiveMaster(masters);
-  if (t != null && t.master.isInitialized()) {
-return t.master.getServerName().toString();
+final int initTimeout = configuration != null ? Integer.parseInt(
+configuration.get("hbase.master.init.timeout.localHBaseCluster", 
"20")) : 20;
+waitForEvent(initTimeout, "initialized", () -> {
+JVMClusterUtil.MasterThread t = findActiveMaster(masters);
+// master thread should never be null at this point, but let's keep 
the check anyway
+return t != null && t.master.isInitialized();
   }
-  // REMOVE
-  if (System.currentTimeMillis() > startTime + 1) {
-try {
-  Thread.sleep(1000);
-} catch (InterruptedException e) {
-  throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-}
+);
+
+return findActiveMaster(masters).master.getServerName().toString();
+  }
+
+  /**
+   * Utility method to wait some time for an event to occur, and then return 
control to the caller.
+  

hbase git commit: HBASE-21287 Allow configuring test master initialization wait time.

2018-10-11 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/master 72552301a -> db9a5b7da


HBASE-21287 Allow configuring test master initialization wait time.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/db9a5b7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/db9a5b7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/db9a5b7d

Branch: refs/heads/master
Commit: db9a5b7da76d0876c9515720a8a60acb299ab217
Parents: 7255230
Author: Mike Drob 
Authored: Wed Oct 10 17:49:54 2018 -0500
Committer: Mike Drob 
Committed: Thu Oct 11 09:43:04 2018 -0500

--
 .../hadoop/hbase/util/JVMClusterUtil.java   | 69 +++-
 1 file changed, 39 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/db9a5b7d/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index ee7ecf3..8c92f66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -23,12 +23,13 @@ import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 
@@ -162,6 +163,8 @@ public class JVMClusterUtil {
*/
   public static String startup(final List masters,
   final List regionservers) throws 
IOException {
+// Implementation note: This method relies on timed sleeps in a loop. It's 
not great, and
+// should probably be re-written to use actual synchronization objects, 
but it's ok for now
 
 Configuration configuration = null;
 
@@ -177,21 +180,9 @@ public class JVMClusterUtil {
 // Wait for an active master
 //  having an active master before starting the region threads allows
 //  then to succeed on their connection to master
-long startTime = System.currentTimeMillis();
-while (findActiveMaster(masters) == null) {
-  try {
-Thread.sleep(100);
-  } catch (InterruptedException e) {
-throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-  }
-  int startTimeout = configuration != null ? Integer.parseInt(
+final int startTimeout = configuration != null ? Integer.parseInt(
 configuration.get("hbase.master.start.timeout.localHBaseCluster", 
"3")) : 3;
-  if (System.currentTimeMillis() > startTime + startTimeout) {
-String msg = "Master not active after " + startTimeout + "ms";
-Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
-throw new RuntimeException(msg);
-  }
-}
+waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != 
null);
 
 if (regionservers != null) {
   for (JVMClusterUtil.RegionServerThread t: regionservers) {
@@ -201,32 +192,50 @@ public class JVMClusterUtil {
 
 // Wait for an active master to be initialized (implies being master)
 //  with this, when we return the cluster is complete
-startTime = System.currentTimeMillis();
-final int maxwait = 20;
-while (true) {
-  JVMClusterUtil.MasterThread t = findActiveMaster(masters);
-  if (t != null && t.master.isInitialized()) {
-return t.master.getServerName().toString();
+final int initTimeout = configuration != null ? Integer.parseInt(
+configuration.get("hbase.master.init.timeout.localHBaseCluster", 
"20")) : 20;
+waitForEvent(initTimeout, "initialized", () -> {
+JVMClusterUtil.MasterThread t = findActiveMaster(masters);
+// master thread should never be null at this point, but let's keep 
the check anyway
+return t != null && t.master.isInitialized();
   }
-  // REMOVE
-  if (System.currentTimeMillis() > startTime + 1) {
-try {
-  Thread.sleep(1000);
-} catch (InterruptedException e) {
-  throw (InterruptedIOException)new 
InterruptedIOException().initCause(e);
-}
+);
+
+return findActiveMaster(masters).master.getServerName().toString();
+  }
+
+  /**
+   * Utility method to wait some time for an event to occur, and then return 
control to the caller.
+   *