hbase git commit: HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore

2018-10-23 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 169e3bafc -> 847d5d14d


HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/847d5d14
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/847d5d14
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/847d5d14

Branch: refs/heads/branch-2.0
Commit: 847d5d14d7fe1ea70b104d19ca17dec0a4fa614a
Parents: 169e3ba
Author: Duo Zhang 
Authored: Wed Oct 24 14:13:25 2018 +0800
Committer: Duo Zhang 
Committed: Wed Oct 24 14:37:49 2018 +0800

--
 .../hbase/procedure2/store/BitSetNode.java  |   6 +-
 .../procedure2/store/ProcedureStoreTracker.java |  60 
 .../store/wal/ProcedureWALFormat.java   |  11 ++
 .../store/wal/ProcedureWALFormatReader.java |   5 +-
 .../procedure2/store/wal/WALProcedureStore.java |  41 +++--
 .../hbase/procedure2/TestProcedureCleanup.java  | 148 ---
 6 files changed, 163 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/847d5d14/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
index 2030c8b..3102bde 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
@@ -131,9 +131,11 @@ class BitSetNode {
 
   public BitSetNode(BitSetNode other, boolean resetDelete) {
 this.start = other.start;
-this.partial = other.partial;
-this.modified = other.modified.clone();
 // The resetDelete will be set to true when building cleanup tracker.
+// as we will reset deleted flags for all the unmodified bits to 1, the 
partial flag is useless
+// so set it to false for not confusing the developers when debugging.
+this.partial = resetDelete ? false : other.partial;
+this.modified = other.modified.clone();
 // The intention here is that, if a procedure is not modified in this 
tracker, then we do not
 // need to take care of it, so we will set deleted to true for these bits, 
i.e, if modified is
 // 0, then we set deleted to 1, otherwise keep it as is. So here, the 
equation is

http://git-wip-us.apache.org/repos/asf/hbase/blob/847d5d14/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 9f99e26..a0978e1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.function.BiFunction;
 import java.util.stream.LongStream;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -87,7 +88,10 @@ public class ProcedureStoreTracker {
*/
   public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
 reset();
-this.partial = tracker.partial;
+// resetDelete will true if we are building the cleanup tracker, as we 
will reset deleted flags
+// for all the unmodified bits to 1, the partial flag is useless so set it 
to false for not
+// confusing the developers when debugging.
+this.partial = resetDelete ? false : tracker.partial;
 this.minModifiedProcId = tracker.minModifiedProcId;
 this.maxModifiedProcId = tracker.maxModifiedProcId;
 this.keepDeletes = tracker.keepDeletes;
@@ -197,43 +201,19 @@ public class ProcedureStoreTracker {
 }
   }
 
-  /**
-   * Similar with {@link #setDeletedIfModified(long...)}, but here the {@code 
procId} are given by
-   * the {@code tracker}. If a procedure is modified by us, and also by the 
given {@code tracker},
-   * then we mark it as deleted.
-   * @see #setDeletedIfModified(long...)
-   */
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
+  private void setDeleteIf(ProcedureStoreTracker tracker,
+  BiFunction func) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
-  final long min

hbase git commit: HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore

2018-10-23 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 6c9e3d067 -> 040ec2227


HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/040ec222
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/040ec222
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/040ec222

Branch: refs/heads/branch-2.1
Commit: 040ec2227eec08aeb02d8f6d4c4e1dbb3d74db31
Parents: 6c9e3d0
Author: Duo Zhang 
Authored: Wed Oct 24 14:13:25 2018 +0800
Committer: Duo Zhang 
Committed: Wed Oct 24 14:37:26 2018 +0800

--
 .../hbase/procedure2/store/BitSetNode.java  |   6 +-
 .../procedure2/store/ProcedureStoreTracker.java |  60 
 .../store/wal/ProcedureWALFormat.java   |  11 ++
 .../store/wal/ProcedureWALFormatReader.java |   5 +-
 .../procedure2/store/wal/WALProcedureStore.java |  41 +++--
 .../hbase/procedure2/TestProcedureCleanup.java  | 148 ---
 6 files changed, 163 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/040ec222/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
index 2030c8b..3102bde 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
@@ -131,9 +131,11 @@ class BitSetNode {
 
   public BitSetNode(BitSetNode other, boolean resetDelete) {
 this.start = other.start;
-this.partial = other.partial;
-this.modified = other.modified.clone();
 // The resetDelete will be set to true when building cleanup tracker.
+// as we will reset deleted flags for all the unmodified bits to 1, the 
partial flag is useless
+// so set it to false for not confusing the developers when debugging.
+this.partial = resetDelete ? false : other.partial;
+this.modified = other.modified.clone();
 // The intention here is that, if a procedure is not modified in this 
tracker, then we do not
 // need to take care of it, so we will set deleted to true for these bits, 
i.e, if modified is
 // 0, then we set deleted to 1, otherwise keep it as is. So here, the 
equation is

http://git-wip-us.apache.org/repos/asf/hbase/blob/040ec222/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 9f99e26..a0978e1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.function.BiFunction;
 import java.util.stream.LongStream;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -87,7 +88,10 @@ public class ProcedureStoreTracker {
*/
   public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
 reset();
-this.partial = tracker.partial;
+// resetDelete will true if we are building the cleanup tracker, as we 
will reset deleted flags
+// for all the unmodified bits to 1, the partial flag is useless so set it 
to false for not
+// confusing the developers when debugging.
+this.partial = resetDelete ? false : tracker.partial;
 this.minModifiedProcId = tracker.minModifiedProcId;
 this.maxModifiedProcId = tracker.maxModifiedProcId;
 this.keepDeletes = tracker.keepDeletes;
@@ -197,43 +201,19 @@ public class ProcedureStoreTracker {
 }
   }
 
-  /**
-   * Similar with {@link #setDeletedIfModified(long...)}, but here the {@code 
procId} are given by
-   * the {@code tracker}. If a procedure is modified by us, and also by the 
given {@code tracker},
-   * then we mark it as deleted.
-   * @see #setDeletedIfModified(long...)
-   */
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
+  private void setDeleteIf(ProcedureStoreTracker tracker,
+  BiFunction func) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
-  final long min

hbase git commit: HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore

2018-10-23 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 1f437ac22 -> b2fcf765a


HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2fcf765
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2fcf765
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2fcf765

Branch: refs/heads/master
Commit: b2fcf765ae9c2764b52151523863d9f2f1f835bb
Parents: 1f437ac
Author: Duo Zhang 
Authored: Wed Oct 24 14:13:25 2018 +0800
Committer: Duo Zhang 
Committed: Wed Oct 24 14:14:19 2018 +0800

--
 .../hbase/procedure2/store/BitSetNode.java  |   6 +-
 .../procedure2/store/ProcedureStoreTracker.java |  60 
 .../store/wal/ProcedureWALFormat.java   |  11 ++
 .../store/wal/ProcedureWALFormatReader.java |   5 +-
 .../procedure2/store/wal/WALProcedureStore.java |  41 +++--
 .../hbase/procedure2/TestProcedureCleanup.java  | 148 ---
 6 files changed, 163 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2fcf765/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
index 2030c8b..3102bde 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
@@ -131,9 +131,11 @@ class BitSetNode {
 
   public BitSetNode(BitSetNode other, boolean resetDelete) {
 this.start = other.start;
-this.partial = other.partial;
-this.modified = other.modified.clone();
 // The resetDelete will be set to true when building cleanup tracker.
+// as we will reset deleted flags for all the unmodified bits to 1, the 
partial flag is useless
+// so set it to false for not confusing the developers when debugging.
+this.partial = resetDelete ? false : other.partial;
+this.modified = other.modified.clone();
 // The intention here is that, if a procedure is not modified in this 
tracker, then we do not
 // need to take care of it, so we will set deleted to true for these bits, 
i.e, if modified is
 // 0, then we set deleted to 1, otherwise keep it as is. So here, the 
equation is

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2fcf765/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 9f99e26..a0978e1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.function.BiFunction;
 import java.util.stream.LongStream;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -87,7 +88,10 @@ public class ProcedureStoreTracker {
*/
   public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
 reset();
-this.partial = tracker.partial;
+// resetDelete will true if we are building the cleanup tracker, as we 
will reset deleted flags
+// for all the unmodified bits to 1, the partial flag is useless so set it 
to false for not
+// confusing the developers when debugging.
+this.partial = resetDelete ? false : tracker.partial;
 this.minModifiedProcId = tracker.minModifiedProcId;
 this.maxModifiedProcId = tracker.maxModifiedProcId;
 this.keepDeletes = tracker.keepDeletes;
@@ -197,43 +201,19 @@ public class ProcedureStoreTracker {
 }
   }
 
-  /**
-   * Similar with {@link #setDeletedIfModified(long...)}, but here the {@code 
procId} are given by
-   * the {@code tracker}. If a procedure is modified by us, and also by the 
given {@code tracker},
-   * then we mark it as deleted.
-   * @see #setDeletedIfModified(long...)
-   */
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
+  private void setDeleteIf(ProcedureStoreTracker tracker,
+  BiFunction func) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
-  final long minProcId =

hbase git commit: HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore

2018-10-23 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f3da04a55 -> 23b58fcca


HBASE-21363 Rewrite the buildingHoldCleanupTracker method in WALProcedureStore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/23b58fcc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/23b58fcc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/23b58fcc

Branch: refs/heads/branch-2
Commit: 23b58fcca0e768aedc04b9b64cc9191cd606d8b1
Parents: f3da04a
Author: Duo Zhang 
Authored: Wed Oct 24 14:13:25 2018 +0800
Committer: Duo Zhang 
Committed: Wed Oct 24 14:14:30 2018 +0800

--
 .../hbase/procedure2/store/BitSetNode.java  |   6 +-
 .../procedure2/store/ProcedureStoreTracker.java |  60 
 .../store/wal/ProcedureWALFormat.java   |  11 ++
 .../store/wal/ProcedureWALFormatReader.java |   5 +-
 .../procedure2/store/wal/WALProcedureStore.java |  41 +++--
 .../hbase/procedure2/TestProcedureCleanup.java  | 148 ---
 6 files changed, 163 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/23b58fcc/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
index 2030c8b..3102bde 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/BitSetNode.java
@@ -131,9 +131,11 @@ class BitSetNode {
 
   public BitSetNode(BitSetNode other, boolean resetDelete) {
 this.start = other.start;
-this.partial = other.partial;
-this.modified = other.modified.clone();
 // The resetDelete will be set to true when building cleanup tracker.
+// as we will reset deleted flags for all the unmodified bits to 1, the 
partial flag is useless
+// so set it to false for not confusing the developers when debugging.
+this.partial = resetDelete ? false : other.partial;
+this.modified = other.modified.clone();
 // The intention here is that, if a procedure is not modified in this 
tracker, then we do not
 // need to take care of it, so we will set deleted to true for these bits, 
i.e, if modified is
 // 0, then we set deleted to 1, otherwise keep it as is. So here, the 
equation is

http://git-wip-us.apache.org/repos/asf/hbase/blob/23b58fcc/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 9f99e26..a0978e1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.function.BiFunction;
 import java.util.stream.LongStream;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -87,7 +88,10 @@ public class ProcedureStoreTracker {
*/
   public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
 reset();
-this.partial = tracker.partial;
+// resetDelete will true if we are building the cleanup tracker, as we 
will reset deleted flags
+// for all the unmodified bits to 1, the partial flag is useless so set it 
to false for not
+// confusing the developers when debugging.
+this.partial = resetDelete ? false : tracker.partial;
 this.minModifiedProcId = tracker.minModifiedProcId;
 this.maxModifiedProcId = tracker.maxModifiedProcId;
 this.keepDeletes = tracker.keepDeletes;
@@ -197,43 +201,19 @@ public class ProcedureStoreTracker {
 }
   }
 
-  /**
-   * Similar with {@link #setDeletedIfModified(long...)}, but here the {@code 
procId} are given by
-   * the {@code tracker}. If a procedure is modified by us, and also by the 
given {@code tracker},
-   * then we mark it as deleted.
-   * @see #setDeletedIfModified(long...)
-   */
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
+  private void setDeleteIf(ProcedureStoreTracker tracker,
+  BiFunction func) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
-  final long minProc

hbase git commit: HBASE-21357 RS should abort if OOM in Reader thread

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f73d6193e -> 5f5f4e82b


HBASE-21357 RS should abort if OOM in Reader thread


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f5f4e82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f5f4e82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f5f4e82

Branch: refs/heads/branch-1
Commit: 5f5f4e82b126b27f833f643abeded67dae3af05d
Parents: f73d619
Author: Allan Yang 
Authored: Wed Oct 24 11:10:20 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 11:10:20 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f5f4e82/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 3f11233..a32040c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -732,6 +732,17 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 LOG.error(getName() + ": CancelledKeyException in Reader", e);
   } catch (IOException ex) {
 LOG.info(getName() + ": IOException in Reader", ex);
+  } catch (OutOfMemoryError e) {
+if (getErrorHandler() != null) {
+  if (getErrorHandler().checkOOME(e)) {
+RpcServer.LOG.info(Thread.currentThread().getName()
++ ": exiting on OutOfMemoryError");
+return;
+  }
+} else {
+  // rethrow if no handler
+  throw e;
+}
   }
 }
   }



hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 d35f65f39 -> 6c9e3d067


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c9e3d06
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c9e3d06
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c9e3d06

Branch: refs/heads/branch-2.1
Commit: 6c9e3d0670bef6c159981850b9c138f60b2c8317
Parents: d35f65f
Author: Allan Yang 
Authored: Wed Oct 24 10:52:52 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 10:52:52 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  10 +
 .../hadoop/hbase/procedure2/Procedure.java  |   4 +
 .../hbase/procedure2/ProcedureExecutor.java |  11 +-
 .../hbase/procedure2/ProcedureScheduler.java|  14 ++
 .../TestMasterProcedureSchedulerOnRestart.java  | 207 +++
 5 files changed, 245 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 01dc1be..472a0d1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -719,6 +719,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 3548e6e..c6c34df 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Proce

hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 c8bfc70d1 -> 169e3bafc


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/169e3baf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/169e3baf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/169e3baf

Branch: refs/heads/branch-2.0
Commit: 169e3bafc889df2b299c9ecfdd57f8d6ff2060dd
Parents: c8bfc70
Author: Allan Yang 
Authored: Wed Oct 24 10:46:09 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 10:46:09 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  10 +
 .../hadoop/hbase/procedure2/Procedure.java  |   4 +
 .../hbase/procedure2/ProcedureExecutor.java |  11 +-
 .../hbase/procedure2/ProcedureScheduler.java|  14 ++
 .../TestMasterProcedureSchedulerOnRestart.java  | 207 +++
 5 files changed, 245 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index a1391a5..a271d8f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -720,6 +720,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 43663ef..a410bc9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Proce

[hbase-operator-tools] branch master updated: Removed docs dirs. Don't seem to work in spite of INFRA-17171.

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 7c5c368  Removed docs dirs. Don't seem to work in spite of INFRA-17171.
7c5c368 is described below

commit 7c5c3689b047614e34f7ac51de6a3cc5ccc84c1b
Author: Michael Stack 
AuthorDate: Tue Oct 23 17:48:13 2018 -0700

Removed docs dirs. Don't seem to work in spite of INFRA-17171.
---
 docs/index.html|   5 --
 hbase-hbck2/docs/README.md | 193 -
 2 files changed, 198 deletions(-)

diff --git a/docs/index.html b/docs/index.html
deleted file mode 100644
index d373edf..000
--- a/docs/index.html
+++ /dev/null
@@ -1,5 +0,0 @@
-
-  
-hi
-  
-
diff --git a/hbase-hbck2/docs/README.md b/hbase-hbck2/docs/README.md
deleted file mode 100644
index 9ac26f2..000
--- a/hbase-hbck2/docs/README.md
+++ /dev/null
@@ -1,193 +0,0 @@
-# Apache HBase HBCK2
-
-HBCK2 is the successor to 
[hbck](https://hbase.apache.org/book.html#hbck.in.depth),
-the hbase-1.x fixup tool (A.K.A _hbck1_). Use it in place of _hbck1_ making 
repairs
-against hbase-2.x installs.
-
-## _hbck1_
-The _hbck_ that ships with hbase-1.x (A.K.A _hbck1_) should not be run against 
an
-hbase-2.x cluster. It may do damage. While _hbck1_ is still bundled inside 
hbase-2.x
--- to minimize surprise (it has a fat pointer to _HBCK2_ at the head of its 
help
-output) -- it's write-facility (`-fix`) has been removed. It can report on the 
state
-of an hbase-2.x cluster but its assessments are likely inaccurate since it 
does not
-understand the workings of an hbase-2.x.
-
-_HBCK2_ does much less than _hbck1_ because many of the class of problems
-_hbck1_ addressed are either no longer issues in hbase-2.x, or we've made
-(or will make) a dedicated tool to do what _hbck1_ used do. _HBCK2_ also
-works in a manner that differs from how _hbck1_ worked, asking the HBase
-Master to do its bidding, rather than replicate functionality outside of the
-
-
-## Running _HBCK2_
-`org.apache.hbase.HBCK2` is the name of the main class. Running the below
-will dump out the _HBCK2_ usage:
-
-
- $ HBASE_CLASSPATH_PREFIX=/tmp/hbase-hbck2-1.0.0-SNAPSHOT.jar ./bin/hbase 
org.apache.hbase.HBCK2
-
-
-## _HBCK2_ Overview
-_HBCK2_ is currently a simple tool that does one thing at a time only.
-
-_HBCK2_ does not do diagnosis, leaving that function to other tooling,
-described below.
-
-In hbase-2.x, the Master is the final arbiter of all state, so a general 
principal of
-_HBCK2_ is that it asks the Master to effect all repair. This means a Master 
must be
-up before you can run an _HBCK2_ command.
-
-_HBCK2_ works by making use of an intentionally obscured `HbckService` hosted 
on the
-Master. The Service publishes a few methods for the _HBCK2_ tool to pull on. 
The
-first thing _HBCK2_ does is poke the cluster to ensure the service is 
available.
-It will fail if it is not or if the `HbckService` is lacking a wanted facility.
-_HBCK2_ versions should be able to work across multiple hbase-2 releases; it 
will
-fail with a message if it is unable to run. There is no `HbckService` in 
versions
-of hbase before 2.0.3 and 2.1.1; _HBCK2_ will not work against these versions.
-
-## Finding Problems
-
-While _hbck1_ performed an analysis reporting your cluster good or bad, _HBCK2_
-does no such thing (not currently). The operator figures what needs fixing and
-then uses tools including _HBCK2_ to do fixup.
-
-To figure if issues in assignment, check Master logs, the Master UI home
-page _table_ tab at `https://YOUR_HOST:YOUR_PORT/master-status#tables`,
-the current _Procedures & Locks_ tab at
-`https://YOUR_HOST:YOUR_PORT/procedures.jsp` off the Master UI home page,
-the HBase Canary tool, and reading Region state out of the `hbase:meta`
-table. Lets look at each in turn. We'll follow this review with a set of
-scenarios in which we use the below tooling to do various fixes.
-
-### Master Logs
-
-The Master runs all assignments, server crash handling, cluster start and
-stop, etc. In hbase-2.x, all that the Master does has been cast as
-Procedures run on a state machine engine. See [Procedure 
Framework](https://hbase.apache.org/book.html#pv2)
-and [Assignment Manager](https://hbase.apache.org/book.html#amv2)
-for detail on how this infrastructure works. Each Procedure has a
-Procedure `id`', it's `pid`. You can trace the lifecycle of a
-Procedure as it logs each of its macro steps denoted by its
-`pid`. Procedures start, step through states and finish. Some
-Procedures spawn sub-procedures, wait on their Children, and then
-themselves finish.
-
-Generally all runs problem free but if some unforeseen circumstance
-arises, the assignment framework may sustain damage requiring
-operator intervention.  Below we will discuss some such scenarios
-but they manifest in the Mas

[hbase-operator-tools] branch master updated: Playing w/ docs dir

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new d559b02  Playing w/ docs dir
d559b02 is described below

commit d559b02605c3001d3367f70c339a24b4f85c5582
Author: Michael Stack 
AuthorDate: Tue Oct 23 17:45:03 2018 -0700

Playing w/ docs dir
---
 docs/index.html | 5 +
 1 file changed, 5 insertions(+)

diff --git a/docs/index.html b/docs/index.html
new file mode 100644
index 000..d373edf
--- /dev/null
+++ b/docs/index.html
@@ -0,0 +1,5 @@
+
+  
+hi
+  
+



[hbase-operator-tools] branch master updated: HBASE-21353 TestHBCKCommandLineParsing#testCommandWithOptions hangs on call to HBCK2#checkHBCKSupport

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 5aec209  HBASE-21353 TestHBCKCommandLineParsing#testCommandWithOptions 
hangs on call to HBCK2#checkHBCKSupport
5aec209 is described below

commit 5aec2090f297a467a3e5f2754871f0c99ec7831e
Author: Michael Stack 
AuthorDate: Tue Oct 23 15:18:12 2018 -0700

HBASE-21353 TestHBCKCommandLineParsing#testCommandWithOptions
hangs on call to HBCK2#checkHBCKSupport
---
 hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java   | 17 -
 .../org/apache/hbase/TestHBCKCommandLineParsing.java| 16 +---
 2 files changed, 9 insertions(+), 24 deletions(-)

diff --git a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java 
b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
index 24717b8..35df153 100644
--- a/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
+++ b/hbase-hbck2/src/main/java/org/apache/hbase/HBCK2.java
@@ -88,13 +88,10 @@ public class HBCK2 extends Configured implements Tool {
   /**
* Check for HBCK support.
*/
-  void checkHBCKSupport() throws IOException {
-try (ClusterConnection connection =
- (ClusterConnection)ConnectionFactory.createConnection(getConf())) 
{
-  try (Admin admin = connection.getAdmin()) {
-
checkVersion(admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION)).
-getHBaseVersion());
-  }
+  void checkHBCKSupport(Connection connection) throws IOException {
+try (Admin admin = connection.getAdmin()) {
+  
checkVersion(admin.getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION)).
+  getHBaseVersion());
 }
   }
 
@@ -110,6 +107,7 @@ public class HBCK2 extends Configured implements Tool {
   TableState setTableState(TableName tableName, TableState.State state) throws 
IOException {
 try (ClusterConnection conn =
  (ClusterConnection) 
ConnectionFactory.createConnection(getConf())) {
+  checkHBCKSupport(conn);
   try (Hbck hbck = conn.getHbck()) {
 return hbck.setTableStateInMeta(new TableState(tableName, state));
   }
@@ -132,6 +130,7 @@ public class HBCK2 extends Configured implements Tool {
 boolean overrideFlag = commandLine.hasOption(override.getOpt());
 try (ClusterConnection conn =
  (ClusterConnection) 
ConnectionFactory.createConnection(getConf())) {
+  checkHBCKSupport(conn);
   try (Hbck hbck = conn.getHbck()) {
 return hbck.assigns(commandLine.getArgList(), overrideFlag);
   }
@@ -154,6 +153,7 @@ public class HBCK2 extends Configured implements Tool {
 boolean overrideFlag = commandLine.hasOption(override.getOpt());
 try (ClusterConnection conn =
  (ClusterConnection) 
ConnectionFactory.createConnection(getConf())) {
+  checkHBCKSupport(conn);
   try (Hbck hbck = conn.getHbck()) {
 return hbck.unassigns(commandLine.getArgList(), overrideFlag);
   }
@@ -196,6 +196,7 @@ public class HBCK2 extends Configured implements Tool {
 boolean recursiveFlag = commandLine.hasOption(override.getOpt());
 List pids = Arrays.stream(pidStrs).map(i -> 
Long.valueOf(i)).collect(Collectors.toList());
 try (ClusterConnection c = (ClusterConnection) 
ConnectionFactory.createConnection(getConf())) {
+  checkHBCKSupport(c);
   try (Hbck hbck = c.getHbck()) {
 return hbck.bypassProcedure(pids, lockWait, overrideFlag, 
recursiveFlag);
   }
@@ -348,8 +349,6 @@ public class HBCK2 extends Configured implements Tool {
 if (commandLine.hasOption(parent.getOpt())) {
   getConf().set(HConstants.ZOOKEEPER_ZNODE_PARENT, 
commandLine.getOptionValue(parent.getOpt()));
 }
-// Check we can run hbck at all.
-checkHBCKSupport();
 
 // Now process commands.
 String[] commands = commandLine.getArgs();
diff --git 
a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKCommandLineParsing.java 
b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKCommandLineParsing.java
index d061767..e8dccb8 100644
--- a/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKCommandLineParsing.java
+++ b/hbase-hbck2/src/test/java/org/apache/hbase/TestHBCKCommandLineParsing.java
@@ -17,29 +17,15 @@
  */
 package org.apache.hbase;
 
-import junit.framework.TestCase;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.logging.log4j.LogManager;
-import org.junit.AfterClass;
-import org.juni

hbase git commit: HBASE-21342 FileSystem in use may get closed by other bulk load call in secure bulkLoad

2018-10-23 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 98861c0f9 -> c8bfc70d1


HBASE-21342 FileSystem in use may get closed by other bulk load call in secure 
bulkLoad

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8bfc70d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8bfc70d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8bfc70d

Branch: refs/heads/branch-2.0
Commit: c8bfc70d1648b8debdd044d6669ed0df5c740095
Parents: 98861c0
Author: mazhenlin 
Authored: Fri Oct 19 14:51:00 2018 +0800
Committer: Mike Drob 
Committed: Tue Oct 23 16:51:28 2018 -0500

--
 .../regionserver/SecureBulkLoadManager.java |  45 +++-
 .../regionserver/TestSecureBulkLoadManager.java | 248 +++
 2 files changed, 292 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8bfc70d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index a4ee517..566a6b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -53,6 +56,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -106,6 +110,7 @@ public class SecureBulkLoadManager {
   private Path baseStagingDir;
 
   private UserProvider userProvider;
+  private ConcurrentHashMap ugiReferenceCounter;
   private Connection conn;
 
   SecureBulkLoadManager(Configuration conf, Connection conn) {
@@ -116,6 +121,7 @@ public class SecureBulkLoadManager {
   public void start() throws IOException {
 random = new SecureRandom();
 userProvider = UserProvider.instantiate(conf);
+ugiReferenceCounter = new ConcurrentHashMap<>();
 fs = FileSystem.get(conf);
 baseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
 
@@ -158,7 +164,7 @@ public class SecureBulkLoadManager {
 } finally {
   UserGroupInformation ugi = getActiveUser().getUGI();
   try {
-if (!UserGroupInformation.getLoginUser().equals(ugi)) {
+if (!UserGroupInformation.getLoginUser().equals(ugi) && 
!isUserReferenced(ugi)) {
   FileSystem.closeAllForUGI(ugi);
 }
   } catch (IOException e) {
@@ -167,6 +173,38 @@ public class SecureBulkLoadManager {
 }
   }
 
+  private Consumer fsCreatedListener;
+
+  @VisibleForTesting
+  void setFsCreatedListener(Consumer fsCreatedListener) {
+this.fsCreatedListener = fsCreatedListener;
+  }
+
+
+  private void incrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.merge(ugi, 1, new BiFunction() {
+  @Override
+  public Integer apply(Integer oldvalue, Integer value) {
+return ++oldvalue;
+  }
+});
+  }
+
+  private void decrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.computeIfPresent(ugi,
+new BiFunction() {
+  @Override
+  public Integer apply(UserGroupInformation key, Integer value) {
+return value > 1 ? --value : null;
+  }
+  });
+  }
+
+  private boolean isUserReferenced(UserGroupInformation ugi) {
+Integer count = ugiReferenceCounter.get(ugi);
+return count != null && count > 0;
+  }
+
   public Map> secureBulkLoadHFiles(final HRegion region,
   final BulkLoadHFileRequest request) throws IOException {
 final List> familyPaths = new 
ArrayList<>(request.getFamilyPathCount());
@@ -208,6 +246,7 @@ public class SecureBulkLoadManager {
 Map> map = null;
 
 try {
+  incrementUgiReference(ugi);
   // Get the target fs (HBase region server fs) delegation token
   // Since we have checked the p

hbase git commit: HBASE-21342 FileSystem in use may get closed by other bulk load call in secure bulkLoad

2018-10-23 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 ae13a5c6e -> d35f65f39


HBASE-21342 FileSystem in use may get closed by other bulk load call in secure 
bulkLoad

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d35f65f3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d35f65f3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d35f65f3

Branch: refs/heads/branch-2.1
Commit: d35f65f39657eea02f39d3da292d8ef4b4555e48
Parents: ae13a5c
Author: mazhenlin 
Authored: Fri Oct 19 14:51:00 2018 +0800
Committer: Mike Drob 
Committed: Tue Oct 23 16:46:28 2018 -0500

--
 .../regionserver/SecureBulkLoadManager.java |  45 +++-
 .../regionserver/TestSecureBulkLoadManager.java | 248 +++
 2 files changed, 292 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d35f65f3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index a4ee517..566a6b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -53,6 +56,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -106,6 +110,7 @@ public class SecureBulkLoadManager {
   private Path baseStagingDir;
 
   private UserProvider userProvider;
+  private ConcurrentHashMap ugiReferenceCounter;
   private Connection conn;
 
   SecureBulkLoadManager(Configuration conf, Connection conn) {
@@ -116,6 +121,7 @@ public class SecureBulkLoadManager {
   public void start() throws IOException {
 random = new SecureRandom();
 userProvider = UserProvider.instantiate(conf);
+ugiReferenceCounter = new ConcurrentHashMap<>();
 fs = FileSystem.get(conf);
 baseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
 
@@ -158,7 +164,7 @@ public class SecureBulkLoadManager {
 } finally {
   UserGroupInformation ugi = getActiveUser().getUGI();
   try {
-if (!UserGroupInformation.getLoginUser().equals(ugi)) {
+if (!UserGroupInformation.getLoginUser().equals(ugi) && 
!isUserReferenced(ugi)) {
   FileSystem.closeAllForUGI(ugi);
 }
   } catch (IOException e) {
@@ -167,6 +173,38 @@ public class SecureBulkLoadManager {
 }
   }
 
+  private Consumer fsCreatedListener;
+
+  @VisibleForTesting
+  void setFsCreatedListener(Consumer fsCreatedListener) {
+this.fsCreatedListener = fsCreatedListener;
+  }
+
+
+  private void incrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.merge(ugi, 1, new BiFunction() {
+  @Override
+  public Integer apply(Integer oldvalue, Integer value) {
+return ++oldvalue;
+  }
+});
+  }
+
+  private void decrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.computeIfPresent(ugi,
+new BiFunction() {
+  @Override
+  public Integer apply(UserGroupInformation key, Integer value) {
+return value > 1 ? --value : null;
+  }
+  });
+  }
+
+  private boolean isUserReferenced(UserGroupInformation ugi) {
+Integer count = ugiReferenceCounter.get(ugi);
+return count != null && count > 0;
+  }
+
   public Map> secureBulkLoadHFiles(final HRegion region,
   final BulkLoadHFileRequest request) throws IOException {
 final List> familyPaths = new 
ArrayList<>(request.getFamilyPathCount());
@@ -208,6 +246,7 @@ public class SecureBulkLoadManager {
 Map> map = null;
 
 try {
+  incrementUgiReference(ugi);
   // Get the target fs (HBase region server fs) delegation token
   // Since we have checked the p

hbase git commit: HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 1e9d99872 -> 1f437ac22


HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f437ac2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f437ac2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f437ac2

Branch: refs/heads/master
Commit: 1f437ac221dac36d269f9bf500b3f491c465ae7a
Parents: 1e9d998
Author: xcang 
Authored: Mon Oct 22 23:16:52 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 14:45:05 2018 -0700

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 --
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 --
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1f437ac2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 8515093..73fabf8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -111,14 +111,16 @@ public class CatalogJanitor extends ScheduledChore {
   protected void chore() {
 try {
   AssignmentManager am = this.services.getAssignmentManager();
-  if (this.enabled.get() && !this.services.isInMaintenanceMode() && am != 
null &&
+  if (this.enabled.get() && !this.services.isInMaintenanceMode() &&
+!this.services.getServerManager().isClusterShutdown() && am != null &&
 am.isMetaLoaded() && !am.hasRegionsInTransition()) {
 scan();
   } else {
 LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() +
   ", maintenanceMode=" + this.services.isInMaintenanceMode() + ", am=" 
+ am +
   ", metaLoaded=" + (am != null && am.isMetaLoaded()) + ", hasRIT=" +
-  (am != null && am.hasRegionsInTransition()));
+  (am != null && am.hasRegionsInTransition()) + " clusterShutDown=" + 
this.services
+  .getServerManager().isClusterShutdown());
   }
 } catch (IOException e) {
   LOG.warn("Failed scan of catalog table", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f437ac2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 90e0e6c..8eaa87d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1781,12 +1781,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   LOG.debug("Master has not been initialized, don't run region 
normalizer.");
   return false;
 }
-
+if (this.getServerManager().isClusterShutdown()) {
+  LOG.info("Cluster is shutting down, don't run region normalizer.");
+  return false;
+}
 if (isInMaintenanceMode()) {
   LOG.info("Master is in maintenance mode, don't run region normalizer.");
   return false;
 }
-
 if (!this.regionNormalizerTracker.isNormalizerOn()) {
   LOG.debug("Region normalization is disabled, don't run region 
normalizer.");
   return false;



hbase git commit: HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 390c3227e -> f3da04a55


HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3da04a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3da04a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3da04a5

Branch: refs/heads/branch-2
Commit: f3da04a552eef6dfae014dd13d3b85fc3abfa55b
Parents: 390c322
Author: xcang 
Authored: Mon Oct 22 23:16:52 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 14:44:45 2018 -0700

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 --
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 --
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3da04a5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 8515093..73fabf8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -111,14 +111,16 @@ public class CatalogJanitor extends ScheduledChore {
   protected void chore() {
 try {
   AssignmentManager am = this.services.getAssignmentManager();
-  if (this.enabled.get() && !this.services.isInMaintenanceMode() && am != 
null &&
+  if (this.enabled.get() && !this.services.isInMaintenanceMode() &&
+!this.services.getServerManager().isClusterShutdown() && am != null &&
 am.isMetaLoaded() && !am.hasRegionsInTransition()) {
 scan();
   } else {
 LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() +
   ", maintenanceMode=" + this.services.isInMaintenanceMode() + ", am=" 
+ am +
   ", metaLoaded=" + (am != null && am.isMetaLoaded()) + ", hasRIT=" +
-  (am != null && am.hasRegionsInTransition()));
+  (am != null && am.hasRegionsInTransition()) + " clusterShutDown=" + 
this.services
+  .getServerManager().isClusterShutdown());
   }
 } catch (IOException e) {
   LOG.warn("Failed scan of catalog table", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3da04a5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 434e59e..4f56d1b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1767,12 +1767,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   LOG.debug("Master has not been initialized, don't run region 
normalizer.");
   return false;
 }
-
+if (this.getServerManager().isClusterShutdown()) {
+  LOG.info("Cluster is shutting down, don't run region normalizer.");
+  return false;
+}
 if (isInMaintenanceMode()) {
   LOG.info("Master is in maintenance mode, don't run region normalizer.");
   return false;
 }
-
 if (!this.regionNormalizerTracker.isNormalizerOn()) {
   LOG.debug("Region normalization is disabled, don't run region 
normalizer.");
   return false;



hbase git commit: HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 3979aebeb -> ae13a5c6e


HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae13a5c6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae13a5c6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae13a5c6

Branch: refs/heads/branch-2.1
Commit: ae13a5c6eadc0ccc703f0ae45cff358e1ad793d7
Parents: 3979aeb
Author: xcang 
Authored: Mon Oct 22 23:16:52 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 14:44:22 2018 -0700

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 --
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 --
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae13a5c6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 8515093..73fabf8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -111,14 +111,16 @@ public class CatalogJanitor extends ScheduledChore {
   protected void chore() {
 try {
   AssignmentManager am = this.services.getAssignmentManager();
-  if (this.enabled.get() && !this.services.isInMaintenanceMode() && am != 
null &&
+  if (this.enabled.get() && !this.services.isInMaintenanceMode() &&
+!this.services.getServerManager().isClusterShutdown() && am != null &&
 am.isMetaLoaded() && !am.hasRegionsInTransition()) {
 scan();
   } else {
 LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() +
   ", maintenanceMode=" + this.services.isInMaintenanceMode() + ", am=" 
+ am +
   ", metaLoaded=" + (am != null && am.isMetaLoaded()) + ", hasRIT=" +
-  (am != null && am.hasRegionsInTransition()));
+  (am != null && am.hasRegionsInTransition()) + " clusterShutDown=" + 
this.services
+  .getServerManager().isClusterShutdown());
   }
 } catch (IOException e) {
   LOG.warn("Failed scan of catalog table", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ae13a5c6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 3c18cb9..e8a9ef8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1719,12 +1719,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   LOG.debug("Master has not been initialized, don't run region 
normalizer.");
   return false;
 }
-
+if (this.getServerManager().isClusterShutdown()) {
+  LOG.info("Cluster is shutting down, don't run region normalizer.");
+  return false;
+}
 if (isInMaintenanceMode()) {
   LOG.info("Master is in maintenance mode, don't run region normalizer.");
   return false;
 }
-
 if (!this.regionNormalizerTracker.isNormalizerOn()) {
   LOG.debug("Region normalization is disabled, don't run region 
normalizer.");
   return false;



hbase git commit: HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 8a04d444d -> 98861c0f9


HBASE-21349 Do not run CatalogJanitor or Nomalizer when cluster is shutting down

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98861c0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98861c0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98861c0f

Branch: refs/heads/branch-2.0
Commit: 98861c0f9bb456cc0392e5b9de6a7e783259b698
Parents: 8a04d44
Author: xcang 
Authored: Mon Oct 22 23:16:52 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 14:43:45 2018 -0700

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 --
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 --
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98861c0f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 8515093..73fabf8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -111,14 +111,16 @@ public class CatalogJanitor extends ScheduledChore {
   protected void chore() {
 try {
   AssignmentManager am = this.services.getAssignmentManager();
-  if (this.enabled.get() && !this.services.isInMaintenanceMode() && am != 
null &&
+  if (this.enabled.get() && !this.services.isInMaintenanceMode() &&
+!this.services.getServerManager().isClusterShutdown() && am != null &&
 am.isMetaLoaded() && !am.hasRegionsInTransition()) {
 scan();
   } else {
 LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() +
   ", maintenanceMode=" + this.services.isInMaintenanceMode() + ", am=" 
+ am +
   ", metaLoaded=" + (am != null && am.isMetaLoaded()) + ", hasRIT=" +
-  (am != null && am.hasRegionsInTransition()));
+  (am != null && am.hasRegionsInTransition()) + " clusterShutDown=" + 
this.services
+  .getServerManager().isClusterShutdown());
   }
 } catch (IOException e) {
   LOG.warn("Failed scan of catalog table", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/98861c0f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 3b3759f..5e10964 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1687,12 +1687,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   LOG.debug("Master has not been initialized, don't run region 
normalizer.");
   return false;
 }
-
+if (this.getServerManager().isClusterShutdown()) {
+  LOG.info("Cluster is shutting down, don't run region normalizer.");
+  return false;
+}
 if (isInMaintenanceMode()) {
   LOG.info("Master is in maintenance mode, don't run region normalizer.");
   return false;
 }
-
 if (!this.regionNormalizerTracker.isNormalizerOn()) {
   LOG.debug("Region normalization is disabled, don't run region 
normalizer.");
   return false;



hbase git commit: HBASE-21342 FileSystem in use may get closed by other bulk load call in secure bulkLoad

2018-10-23 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 d4ce9e534 -> 390c3227e


HBASE-21342 FileSystem in use may get closed by other bulk load call in secure 
bulkLoad

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/390c3227
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/390c3227
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/390c3227

Branch: refs/heads/branch-2
Commit: 390c3227e23ee04ca9797fdf1e3bdd93ce9a1882
Parents: d4ce9e5
Author: mazhenlin 
Authored: Fri Oct 19 14:51:00 2018 +0800
Committer: Mike Drob 
Committed: Tue Oct 23 15:52:05 2018 -0500

--
 .../regionserver/SecureBulkLoadManager.java |  45 +++-
 .../regionserver/TestSecureBulkLoadManager.java | 248 +++
 2 files changed, 292 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/390c3227/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index a4ee517..566a6b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -53,6 +56,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -106,6 +110,7 @@ public class SecureBulkLoadManager {
   private Path baseStagingDir;
 
   private UserProvider userProvider;
+  private ConcurrentHashMap ugiReferenceCounter;
   private Connection conn;
 
   SecureBulkLoadManager(Configuration conf, Connection conn) {
@@ -116,6 +121,7 @@ public class SecureBulkLoadManager {
   public void start() throws IOException {
 random = new SecureRandom();
 userProvider = UserProvider.instantiate(conf);
+ugiReferenceCounter = new ConcurrentHashMap<>();
 fs = FileSystem.get(conf);
 baseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
 
@@ -158,7 +164,7 @@ public class SecureBulkLoadManager {
 } finally {
   UserGroupInformation ugi = getActiveUser().getUGI();
   try {
-if (!UserGroupInformation.getLoginUser().equals(ugi)) {
+if (!UserGroupInformation.getLoginUser().equals(ugi) && 
!isUserReferenced(ugi)) {
   FileSystem.closeAllForUGI(ugi);
 }
   } catch (IOException e) {
@@ -167,6 +173,38 @@ public class SecureBulkLoadManager {
 }
   }
 
+  private Consumer fsCreatedListener;
+
+  @VisibleForTesting
+  void setFsCreatedListener(Consumer fsCreatedListener) {
+this.fsCreatedListener = fsCreatedListener;
+  }
+
+
+  private void incrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.merge(ugi, 1, new BiFunction() {
+  @Override
+  public Integer apply(Integer oldvalue, Integer value) {
+return ++oldvalue;
+  }
+});
+  }
+
+  private void decrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.computeIfPresent(ugi,
+new BiFunction() {
+  @Override
+  public Integer apply(UserGroupInformation key, Integer value) {
+return value > 1 ? --value : null;
+  }
+  });
+  }
+
+  private boolean isUserReferenced(UserGroupInformation ugi) {
+Integer count = ugiReferenceCounter.get(ugi);
+return count != null && count > 0;
+  }
+
   public Map> secureBulkLoadHFiles(final HRegion region,
   final BulkLoadHFileRequest request) throws IOException {
 final List> familyPaths = new 
ArrayList<>(request.getFamilyPathCount());
@@ -208,6 +246,7 @@ public class SecureBulkLoadManager {
 Map> map = null;
 
 try {
+  incrementUgiReference(ugi);
   // Get the target fs (HBase region server fs) delegation token
   // Since we have checked the permi

hbase git commit: HBASE-21342 FileSystem in use may get closed by other bulk load call in secure bulkLoad

2018-10-23 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/master 807736fcf -> 1e9d99872


HBASE-21342 FileSystem in use may get closed by other bulk load call in secure 
bulkLoad

Signed-off-by: Mike Drob 
Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e9d9987
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e9d9987
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e9d9987

Branch: refs/heads/master
Commit: 1e9d998727773a724d3a37401e4e1cea3474bd9d
Parents: 807736f
Author: mazhenlin 
Authored: Fri Oct 19 14:51:00 2018 +0800
Committer: Mike Drob 
Committed: Tue Oct 23 15:51:46 2018 -0500

--
 .../regionserver/SecureBulkLoadManager.java |  45 +++-
 .../regionserver/TestSecureBulkLoadManager.java | 248 +++
 2 files changed, 292 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e9d9987/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
index a4ee517..566a6b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java
@@ -25,6 +25,9 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -53,6 +56,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
@@ -106,6 +110,7 @@ public class SecureBulkLoadManager {
   private Path baseStagingDir;
 
   private UserProvider userProvider;
+  private ConcurrentHashMap ugiReferenceCounter;
   private Connection conn;
 
   SecureBulkLoadManager(Configuration conf, Connection conn) {
@@ -116,6 +121,7 @@ public class SecureBulkLoadManager {
   public void start() throws IOException {
 random = new SecureRandom();
 userProvider = UserProvider.instantiate(conf);
+ugiReferenceCounter = new ConcurrentHashMap<>();
 fs = FileSystem.get(conf);
 baseStagingDir = new Path(FSUtils.getRootDir(conf), 
HConstants.BULKLOAD_STAGING_DIR_NAME);
 
@@ -158,7 +164,7 @@ public class SecureBulkLoadManager {
 } finally {
   UserGroupInformation ugi = getActiveUser().getUGI();
   try {
-if (!UserGroupInformation.getLoginUser().equals(ugi)) {
+if (!UserGroupInformation.getLoginUser().equals(ugi) && 
!isUserReferenced(ugi)) {
   FileSystem.closeAllForUGI(ugi);
 }
   } catch (IOException e) {
@@ -167,6 +173,38 @@ public class SecureBulkLoadManager {
 }
   }
 
+  private Consumer fsCreatedListener;
+
+  @VisibleForTesting
+  void setFsCreatedListener(Consumer fsCreatedListener) {
+this.fsCreatedListener = fsCreatedListener;
+  }
+
+
+  private void incrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.merge(ugi, 1, new BiFunction() {
+  @Override
+  public Integer apply(Integer oldvalue, Integer value) {
+return ++oldvalue;
+  }
+});
+  }
+
+  private void decrementUgiReference(UserGroupInformation ugi) {
+ugiReferenceCounter.computeIfPresent(ugi,
+new BiFunction() {
+  @Override
+  public Integer apply(UserGroupInformation key, Integer value) {
+return value > 1 ? --value : null;
+  }
+  });
+  }
+
+  private boolean isUserReferenced(UserGroupInformation ugi) {
+Integer count = ugiReferenceCounter.get(ugi);
+return count != null && count > 0;
+  }
+
   public Map> secureBulkLoadHFiles(final HRegion region,
   final BulkLoadHFileRequest request) throws IOException {
 final List> familyPaths = new 
ArrayList<>(request.getFamilyPathCount());
@@ -208,6 +246,7 @@ public class SecureBulkLoadManager {
 Map> map = null;
 
 try {
+  incrementUgiReference(ugi);
   // Get the target fs (HBase region server fs) delegation token
   // Since we have checked the permissio

hbase git commit: HBASE-21338 Warn if balancer is an ill-fit for cluster size

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 3b68e5393 -> 807736fcf


HBASE-21338 Warn if balancer is an ill-fit for cluster size

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/807736fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/807736fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/807736fc

Branch: refs/heads/master
Commit: 807736fcf153d0d83eab50fd20d096d37c651ca9
Parents: 3b68e53
Author: xcang 
Authored: Mon Oct 22 16:29:08 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 13:23:24 2018 -0700

--
 .../master/balancer/StochasticLoadBalancer.java   | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/807736fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index a93e2db..30e4d49 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -374,9 +374,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
-
 double initCost = currentCost;
 double newCost = currentCost;
 
@@ -385,9 +382,20 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   computedMaxSteps = Math.max(this.maxSteps,
   ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
 } else {
-  computedMaxSteps = Math.min(this.maxSteps,
-  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+  long calculatedMaxSteps = (long)cluster.numRegions * 
(long)this.stepsPerRegion *
+  (long)cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is 
larger than "
++ "maxSteps:{}. Hence load balancing may not work well. Setting 
parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime);
+  }
 }
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



hbase git commit: HBASE-21338 Warn if balancer is an ill-fit for cluster size

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 e29ce9f93 -> 3979aebeb


HBASE-21338 Warn if balancer is an ill-fit for cluster size

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3979aebe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3979aebe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3979aebe

Branch: refs/heads/branch-2.1
Commit: 3979aebebf5e098ab7202647b0e7fe06c2829211
Parents: e29ce9f
Author: xcang 
Authored: Mon Oct 22 16:29:08 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 13:22:41 2018 -0700

--
 .../master/balancer/StochasticLoadBalancer.java   | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3979aebe/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index d134690..b2c6629 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -372,9 +372,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
-
 double initCost = currentCost;
 double newCost = currentCost;
 
@@ -383,9 +380,20 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   computedMaxSteps = Math.max(this.maxSteps,
   ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
 } else {
-  computedMaxSteps = Math.min(this.maxSteps,
-  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+  long calculatedMaxSteps = (long)cluster.numRegions * 
(long)this.stepsPerRegion *
+  (long)cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is 
larger than "
++ "maxSteps:{}. Hence load balancing may not work well. Setting 
parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime);
+  }
 }
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



hbase git commit: HBASE-21338 Warn if balancer is an ill-fit for cluster size

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1b1dabd1f -> d4ce9e534


HBASE-21338 Warn if balancer is an ill-fit for cluster size

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4ce9e53
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4ce9e53
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4ce9e53

Branch: refs/heads/branch-2
Commit: d4ce9e534ee7e61c898d5c44a96a4592c5bd7dcf
Parents: 1b1dabd
Author: xcang 
Authored: Mon Oct 22 16:29:08 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 13:23:03 2018 -0700

--
 .../master/balancer/StochasticLoadBalancer.java   | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4ce9e53/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index d134690..b2c6629 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -372,9 +372,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
-
 double initCost = currentCost;
 double newCost = currentCost;
 
@@ -383,9 +380,20 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   computedMaxSteps = Math.max(this.maxSteps,
   ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
 } else {
-  computedMaxSteps = Math.min(this.maxSteps,
-  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+  long calculatedMaxSteps = (long)cluster.numRegions * 
(long)this.stepsPerRegion *
+  (long)cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is 
larger than "
++ "maxSteps:{}. Hence load balancing may not work well. Setting 
parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime);
+  }
 }
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



hbase git commit: HBASE-21338 Warn if balancer is an ill-fit for cluster size

2018-10-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 2b675e36d -> 8a04d444d


HBASE-21338 Warn if balancer is an ill-fit for cluster size

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a04d444
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a04d444
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a04d444

Branch: refs/heads/branch-2.0
Commit: 8a04d444d7ea999f06c3a15137c75b507ae75925
Parents: 2b675e3
Author: xcang 
Authored: Mon Oct 22 16:29:08 2018 -0700
Committer: Michael Stack 
Committed: Tue Oct 23 13:21:36 2018 -0700

--
 .../master/balancer/StochasticLoadBalancer.java   | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a04d444/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index d134690..b2c6629 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -372,9 +372,6 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
-
 double initCost = currentCost;
 double newCost = currentCost;
 
@@ -383,9 +380,20 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   computedMaxSteps = Math.max(this.maxSteps,
   ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
 } else {
-  computedMaxSteps = Math.min(this.maxSteps,
-  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+  long calculatedMaxSteps = (long)cluster.numRegions * 
(long)this.stepsPerRegion *
+  (long)cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn("calculatedMaxSteps:{} for loadbalancer's stochastic walk is 
larger than "
++ "maxSteps:{}. Hence load balancing may not work well. Setting 
parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime);
+  }
 }
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



[hbase-operator-tools] branch master updated: Formatting

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new eb7b205  Formatting
eb7b205 is described below

commit eb7b20585d8b5c466d39ea8600761add617b5b9a
Author: Michael Stack 
AuthorDate: Tue Oct 23 10:23:41 2018 -0700

Formatting
---
 hbase-hbck2/README.md | 24 
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index b579d69..cf3bdbf 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -28,7 +28,8 @@ _HBCK2_ to generate the _HBCK2_ jar file, running the below 
will dump out the _H
  $ HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar ./bin/hbase 
org.apache.hbase.HBCK2
 
 
-```usage: HBCK2 [OPTIONS] COMMAND 
+```
+usage: HBCK2 [OPTIONS] COMMAND 
 
 Options:
  -d,--debug run with debug output
@@ -138,7 +139,8 @@ and is not letting go.
 
 _STUCK_ Procedures look like this:
 
-```2018-09-12 15:29:06,558 WARN 
org.apache.hadoop.hbase.master.assignment.AssignmentManager: STUCK 
Region-In-Transition rit=OPENING, 
location=va1001.example.org,22101,1536173230599, 
table=IntegrationTestBigLinkedList_20180626110336, 
region=dbdb56242f17610c46ea044f7a42895b
+```
+2018-09-12 15:29:06,558 WARN 
org.apache.hadoop.hbase.master.assignment.AssignmentManager: STUCK 
Region-In-Transition rit=OPENING, 
location=va1001.example.org,22101,1536173230599, 
table=IntegrationTestBigLinkedList_20180626110336, 
region=dbdb56242f17610c46ea044f7a42895b
 ```
 
 
@@ -170,7 +172,8 @@ is needed to alieve the blockage.
 
 Lists of locks and procedures can also be obtained via the hbase shell:
 
-```$ echo "list_locks"| hbase shell &> /tmp/locks.txt
+```
+$ echo "list_locks"| hbase shell &> /tmp/locks.txt
 $ echo "list_procedures"| hbase shell &> /tmp/procedures.txt
 ```
 
@@ -181,7 +184,8 @@ It can be run with a table focus or against the whole 
cluster.
 
 For example, to check cluster assigns:
 
-```$ hbase canary -f false -t 600 &>/tmp/canary.log
+```
+$ hbase canary -f false -t 600 &>/tmp/canary.log
 ```
 
 The _-f false_ tells the Canary to keep going across failed Region
@@ -194,7 +198,8 @@ For example, given a Region that has a start row of 
_d10c_
 belonging to the table _testtable_, do as follows:
 
 
-```hbase> scan 'testtable', {STARTROW => 'd10c', LIMIT => 10}
+```
+hbase> scan 'testtable', {STARTROW => 'd10c', LIMIT => 10}
 ```
 
 For an overview on parsing a Region name into its constituent parts, see
@@ -207,7 +212,8 @@ _ENABLED_ or _ENABLING_ table, read the _hbase:meta_ table 
_info:state_ column.
 For example, to find the state of all Regions in the table
 _IntegrationTestBigLinkedList_20180626064758_, do the following:
 
-```$ echo " scan 'hbase:meta', {ROWPREFIXFILTER => 
'IntegrationTestBigLinkedList_20180626064758,', COLUMN => 'info:state'}"| hbase 
shell > /tmp/t.txt
+```
+$ echo " scan 'hbase:meta', {ROWPREFIXFILTER => 
'IntegrationTestBigLinkedList_20180626064758,', COLUMN => 'info:state'}"| hbase 
shell > /tmp/t.txt
 ```
 
 ...then grep for _OPENING_ or _CLOSING_ Regions.
@@ -273,13 +279,15 @@ current list of outstanding Locks.
 
 This should never happen. If it does, here is what it looks like:
 
-```2018-10-01 22:07:42,792 WARN org.apache.hadoop.hbase.master.HMaster: 
hbase:meta,,1.1588230740 is NOT online; state={1588230740 state=CLOSING, 
ts=1538456302300, server=ve1017.example.org,22101,1538449648131}; 
ServerCrashProcedures=true. Master startup cannot progress, in holding-pattern 
until region onlined.
+```
+2018-10-01 22:07:42,792 WARN org.apache.hadoop.hbase.master.HMaster: 
hbase:meta,,1.1588230740 is NOT online; state={1588230740 state=CLOSING, 
ts=1538456302300, server=ve1017.example.org,22101,1538449648131}; 
ServerCrashProcedures=true. Master startup cannot progress, in holding-pattern 
until region onlined.
 ```
 
 The Master is unable to continue startup because there is no Procedure to 
assign
 _hbase:meta_ (or _hbase:namespace_). To inject one, use the _HBCK2_ tool:
 
-``` HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar hbase 
org.apache.hbase.HBCK2 assigns 1588230740
+```
+HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar hbase 
org.apache.hbase.HBCK2 assigns 1588230740
 ```
 
 ...where 1588230740 is the encoded name of the _hbase:meta_ Region.



[hbase-operator-tools] branch master updated: Formatting

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 5811ff2  Formatting
5811ff2 is described below

commit 5811ff2384e12aa69596da1ecdedc5c63f0fa582
Author: Michael Stack 
AuthorDate: Tue Oct 23 10:20:55 2018 -0700

Formatting
---
 hbase-hbck2/README.md | 21 ++---
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index 249afcf..b579d69 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -138,7 +138,8 @@ and is not letting go.
 
 _STUCK_ Procedures look like this:
 
-```2018-09-12 15:29:06,558 WARN 
org.apache.hadoop.hbase.master.assignment.AssignmentManager: STUCK 
Region-In-Transition rit=OPENING, 
location=va1001.example.org,22101,1536173230599, 
table=IntegrationTestBigLinkedList_20180626110336, 
region=dbdb56242f17610c46ea044f7a42895b```
+```2018-09-12 15:29:06,558 WARN 
org.apache.hadoop.hbase.master.assignment.AssignmentManager: STUCK 
Region-In-Transition rit=OPENING, 
location=va1001.example.org,22101,1536173230599, 
table=IntegrationTestBigLinkedList_20180626110336, 
region=dbdb56242f17610c46ea044f7a42895b
+```
 
 
  /master-status#tables
@@ -170,7 +171,8 @@ is needed to alieve the blockage.
 Lists of locks and procedures can also be obtained via the hbase shell:
 
 ```$ echo "list_locks"| hbase shell &> /tmp/locks.txt
-$ echo "list_procedures"| hbase shell &> /tmp/procedures.txt```
+$ echo "list_procedures"| hbase shell &> /tmp/procedures.txt
+```
 
  The [HBase Canary Tool](http://hbase.apache.org/book.html#_canary)
 
@@ -179,7 +181,8 @@ It can be run with a table focus or against the whole 
cluster.
 
 For example, to check cluster assigns:
 
-```$ hbase canary -f false -t 600 &>/tmp/canary.log```
+```$ hbase canary -f false -t 600 &>/tmp/canary.log
+```
 
 The _-f false_ tells the Canary to keep going across failed Region
 fetches and the _-t 600_ tells the Canary run for ~two hours
@@ -191,7 +194,8 @@ For example, given a Region that has a start row of 
_d10c_
 belonging to the table _testtable_, do as follows:
 
 
-```hbase> scan 'testtable', {STARTROW => 'd10c', LIMIT => 10}```
+```hbase> scan 'testtable', {STARTROW => 'd10c', LIMIT => 10}
+```
 
 For an overview on parsing a Region name into its constituent parts, see
 [RegionInfo 
API](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html).
@@ -203,7 +207,8 @@ _ENABLED_ or _ENABLING_ table, read the _hbase:meta_ table 
_info:state_ column.
 For example, to find the state of all Regions in the table
 _IntegrationTestBigLinkedList_20180626064758_, do the following:
 
-```$ echo " scan 'hbase:meta', {ROWPREFIXFILTER => 
'IntegrationTestBigLinkedList_20180626064758,', COLUMN => 'info:state'}"| hbase 
shell > /tmp/t.txt```
+```$ echo " scan 'hbase:meta', {ROWPREFIXFILTER => 
'IntegrationTestBigLinkedList_20180626064758,', COLUMN => 'info:state'}"| hbase 
shell > /tmp/t.txt
+```
 
 ...then grep for _OPENING_ or _CLOSING_ Regions.
 
@@ -268,12 +273,14 @@ current list of outstanding Locks.
 
 This should never happen. If it does, here is what it looks like:
 
-```2018-10-01 22:07:42,792 WARN org.apache.hadoop.hbase.master.HMaster: 
hbase:meta,,1.1588230740 is NOT online; state={1588230740 state=CLOSING, 
ts=1538456302300, server=ve1017.example.org,22101,1538449648131}; 
ServerCrashProcedures=true. Master startup cannot progress, in holding-pattern 
until region onlined.```
+```2018-10-01 22:07:42,792 WARN org.apache.hadoop.hbase.master.HMaster: 
hbase:meta,,1.1588230740 is NOT online; state={1588230740 state=CLOSING, 
ts=1538456302300, server=ve1017.example.org,22101,1538449648131}; 
ServerCrashProcedures=true. Master startup cannot progress, in holding-pattern 
until region onlined.
+```
 
 The Master is unable to continue startup because there is no Procedure to 
assign
 _hbase:meta_ (or _hbase:namespace_). To inject one, use the _HBCK2_ tool:
 
-``` HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar hbase 
org.apache.hbase.HBCK2 assigns 1588230740```
+``` HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar hbase 
org.apache.hbase.HBCK2 assigns 1588230740
+```
 
 ...where 1588230740 is the encoded name of the _hbase:meta_ Region.
 



[hbase-operator-tools] branch master updated: Formatting

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 613ef58  Formatting
613ef58 is described below

commit 613ef58c5bc9dc57aec372e3357058cdd9b294d4
Author: Michael Stack 
AuthorDate: Tue Oct 23 10:19:34 2018 -0700

Formatting
---
 hbase-hbck2/README.md | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index 4399f6f..249afcf 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -82,7 +82,8 @@ default=1
A value of \x08\x00 == ENABLED, \x08\x01 == DISABLED, etc.
An example making table name 'user' ENABLED:
  $ HBCK2 setTableState users ENABLED
-   Returns whatever the previous table state was.```
+   Returns whatever the previous table state was.
+```
 
 ## _HBCK2_ Overview
 _HBCK2_ is currently a simple tool that does one thing at a time only.



[hbase-operator-tools] branch master updated: More doc on fixing

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 7ed9e83  More doc on fixing
7ed9e83 is described below

commit 7ed9e83cfe1e78759c92c387afc35eb89383d3b4
Author: Michael Stack 
AuthorDate: Tue Oct 23 10:17:46 2018 -0700

More doc on fixing
---
 hbase-hbck2/README.md | 212 +++---
 1 file changed, 150 insertions(+), 62 deletions(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index 0e1c875..4399f6f 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -1,32 +1,89 @@
-# HBCK2
+# Apache HBase HBCK2 Tool
 
 HBCK2 is the successor to 
[hbck](https://hbase.apache.org/book.html#hbck.in.depth),
 the hbase-1.x fixup tool (A.K.A _hbck1_). Use it in place of _hbck1_ making 
repairs
 against hbase-2.x installs.
 
 ## _hbck1_
-The _hbck_ that ships with hbase-1.x (A.K.A _hbck1_) should not be run against 
an
+The _hbck_ tool that ships with hbase-1.x (A.K.A _hbck1_) should not be run 
against an
 hbase-2.x cluster. It may do damage. While _hbck1_ is still bundled inside 
hbase-2.x
 -- to minimize surprise (it has a fat pointer to _HBCK2_ at the head of its 
help
 output) -- it's write-facility (`-fix`) has been removed. It can report on the 
state
 of an hbase-2.x cluster but its assessments are likely inaccurate since it 
does not
-understand the workings of an hbase-2.x.
+understand the internal workings of an hbase-2.x.
 
 _HBCK2_ does much less than _hbck1_ because many of the class of problems
 _hbck1_ addressed are either no longer issues in hbase-2.x, or we've made
-(or will make) a dedicated tool to do what _hbck1_ used do. _HBCK2_ also
-works in a manner that differs from how _hbck1_ worked, asking the HBase
+(or will make) a dedicated tool to do what _hbck1_ used incorporate. _HBCK2_ 
also
+works in a manner that differs from how _hbck1_ operated, asking the HBase
 Master to do its bidding, rather than replicate functionality outside of the
+Master inside the _hbck1_ tool.
 
 
 ## Running _HBCK2_
-`org.apache.hbase.HBCK2` is the name of the main class. Running the below
-will dump out the _HBCK2_ usage:
+`org.apache.hbase.HBCK2` is the name of the _HBCK2_ main class. After building
+_HBCK2_ to generate the _HBCK2_ jar file, running the below will dump out the 
_HBCK2_ usage:
 
 
- $ HBASE_CLASSPATH_PREFIX=/tmp/hbase-hbck2-1.0.0-SNAPSHOT.jar ./bin/hbase 
org.apache.hbase.HBCK2
+ $ HBASE_CLASSPATH_PREFIX=./hbase-hbck2-1.0.0-SNAPSHOT.jar ./bin/hbase 
org.apache.hbase.HBCK2
 
 
+```usage: HBCK2 [OPTIONS] COMMAND 
+
+Options:
+ -d,--debug run with debug output
+ -h,--help  output this help message
+ -p,--hbase.zookeeper.property.clientPort   port of target hbase ensemble
+ -q,--hbase.zookeeper.quorum   ensemble of target hbase
+ -v,--version   this hbck2 version
+ -z,--zookeeper.znode.parentparent znode of target hbase
+
+Commands:
+ assigns [OPTIONS] ...
+   Options:
+-o,--override  override ownership by another procedure
+   A 'raw' assign that can be used even during Master initialization.
+   Skirts Coprocessors. Pass one or more encoded RegionNames.
+   1588230740 is the hard-coded name for the hbase:meta region and
+   de00010733901a05f5a2a3a382e27dd4 is an example of what a user-space
+   encoded Region name looks like. For example:
+ $ HBCK2 assign 1588230740 de00010733901a05f5a2a3a382e27dd4
+   Returns the pid(s) of the created AssignProcedure(s) or -1 if none.
+
+ bypass [OPTIONS] ...
+   Options:
+-o,--override   override if procedure is running/stuck
+-r,--recursive  bypass parent and its children. SLOW! EXPENSIVE!
+-w,--lockWait   milliseconds to wait on lock before giving up;
+default=1
+   Pass one (or more) procedure 'pid's to skip to procedure finish.
+   Parent of bypassed procedure will also be skipped to the finish.
+   Entities will be left in an inconsistent state and will require
+   manual fixup. May need Master restart to clear locks still held.
+   Bypass fails if procedure has children. Add 'recursive' if all
+   you have is a parent pid to finish parent and children. This
+   is SLOW, and dangerous so use selectively. Does not always work.
+
+ unassigns ...
+   Options:
+-o,--override  override ownership by another procedure
+   A 'raw' unassign that can be used even during Master initialization.
+   Skirts Coprocessors. Pass one or more encoded RegionNames:
+   1588230740 is the hard-coded name for the hbase:meta region and
+   de00010733901a05f5a2a3a382e27dd4 is an example of what a user-space
+   encoded Region name looks like. For example:
+ $ HBCK2 unassign 1588230740 de00010733901a05f5a2a3a382e27dd4
+   Returns the pid(s) of the created UnassignProc

[hbase-operator-tools] branch master updated: More playing w/ gh-pages

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 7c13e2c  More playing w/ gh-pages
7c13e2c is described below

commit 7c13e2ca67aa0988665130836525017494083e1f
Author: Michael Stack 
AuthorDate: Tue Oct 23 08:56:06 2018 -0700

More playing w/ gh-pages
---
 hbase-hbck2/docs/README.md  | 193 
 hbase-hbck2/docs/index.html |   5 --
 2 files changed, 193 insertions(+), 5 deletions(-)

diff --git a/hbase-hbck2/docs/README.md b/hbase-hbck2/docs/README.md
new file mode 100644
index 000..9ac26f2
--- /dev/null
+++ b/hbase-hbck2/docs/README.md
@@ -0,0 +1,193 @@
+# Apache HBase HBCK2
+
+HBCK2 is the successor to 
[hbck](https://hbase.apache.org/book.html#hbck.in.depth),
+the hbase-1.x fixup tool (A.K.A _hbck1_). Use it in place of _hbck1_ making 
repairs
+against hbase-2.x installs.
+
+## _hbck1_
+The _hbck_ that ships with hbase-1.x (A.K.A _hbck1_) should not be run against 
an
+hbase-2.x cluster. It may do damage. While _hbck1_ is still bundled inside 
hbase-2.x
+-- to minimize surprise (it has a fat pointer to _HBCK2_ at the head of its 
help
+output) -- it's write-facility (`-fix`) has been removed. It can report on the 
state
+of an hbase-2.x cluster but its assessments are likely inaccurate since it 
does not
+understand the workings of an hbase-2.x.
+
+_HBCK2_ does much less than _hbck1_ because many of the class of problems
+_hbck1_ addressed are either no longer issues in hbase-2.x, or we've made
+(or will make) a dedicated tool to do what _hbck1_ used do. _HBCK2_ also
+works in a manner that differs from how _hbck1_ worked, asking the HBase
+Master to do its bidding, rather than replicate functionality outside of the
+
+
+## Running _HBCK2_
+`org.apache.hbase.HBCK2` is the name of the main class. Running the below
+will dump out the _HBCK2_ usage:
+
+
+ $ HBASE_CLASSPATH_PREFIX=/tmp/hbase-hbck2-1.0.0-SNAPSHOT.jar ./bin/hbase 
org.apache.hbase.HBCK2
+
+
+## _HBCK2_ Overview
+_HBCK2_ is currently a simple tool that does one thing at a time only.
+
+_HBCK2_ does not do diagnosis, leaving that function to other tooling,
+described below.
+
+In hbase-2.x, the Master is the final arbiter of all state, so a general 
principal of
+_HBCK2_ is that it asks the Master to effect all repair. This means a Master 
must be
+up before you can run an _HBCK2_ command.
+
+_HBCK2_ works by making use of an intentionally obscured `HbckService` hosted 
on the
+Master. The Service publishes a few methods for the _HBCK2_ tool to pull on. 
The
+first thing _HBCK2_ does is poke the cluster to ensure the service is 
available.
+It will fail if it is not or if the `HbckService` is lacking a wanted facility.
+_HBCK2_ versions should be able to work across multiple hbase-2 releases; it 
will
+fail with a message if it is unable to run. There is no `HbckService` in 
versions
+of hbase before 2.0.3 and 2.1.1; _HBCK2_ will not work against these versions.
+
+## Finding Problems
+
+While _hbck1_ performed an analysis reporting your cluster good or bad, _HBCK2_
+does no such thing (not currently). The operator figures what needs fixing and
+then uses tools including _HBCK2_ to do fixup.
+
+To figure if issues in assignment, check Master logs, the Master UI home
+page _table_ tab at `https://YOUR_HOST:YOUR_PORT/master-status#tables`,
+the current _Procedures & Locks_ tab at
+`https://YOUR_HOST:YOUR_PORT/procedures.jsp` off the Master UI home page,
+the HBase Canary tool, and reading Region state out of the `hbase:meta`
+table. Lets look at each in turn. We'll follow this review with a set of
+scenarios in which we use the below tooling to do various fixes.
+
+### Master Logs
+
+The Master runs all assignments, server crash handling, cluster start and
+stop, etc. In hbase-2.x, all that the Master does has been cast as
+Procedures run on a state machine engine. See [Procedure 
Framework](https://hbase.apache.org/book.html#pv2)
+and [Assignment Manager](https://hbase.apache.org/book.html#amv2)
+for detail on how this infrastructure works. Each Procedure has a
+Procedure `id`', it's `pid`. You can trace the lifecycle of a
+Procedure as it logs each of its macro steps denoted by its
+`pid`. Procedures start, step through states and finish. Some
+Procedures spawn sub-procedures, wait on their Children, and then
+themselves finish.
+
+Generally all runs problem free but if some unforeseen circumstance
+arises, the assignment framework may sustain damage requiring
+operator intervention.  Below we will discuss some such scenarios
+but they manifest in the Master log as a Region being _STUCK_ or
+a Procedure transitioning an entity -- a Region of a Table --
+may be blocked because another Procedure holds the exclusive lock
+and is not letting go. More on these scenarios below.
+
+### 

[hbase-operator-tools] branch master updated: Playing w/ gh-pages

2018-10-23 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase-operator-tools.git


The following commit(s) were added to refs/heads/master by this push:
 new 08dea87  Playing w/ gh-pages
08dea87 is described below

commit 08dea87c68381e3073679ee8e9416c6c7995cbe9
Author: Michael Stack 
AuthorDate: Tue Oct 23 08:52:08 2018 -0700

Playing w/ gh-pages
---
 hbase-hbck2/README.md   | 11 +--
 hbase-hbck2/docs/index.html |  5 +
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/hbase-hbck2/README.md b/hbase-hbck2/README.md
index a3abd77..0e1c875 100644
--- a/hbase-hbck2/README.md
+++ b/hbase-hbck2/README.md
@@ -63,10 +63,8 @@ scenarios in which we use the below tooling to do various 
fixes.
 
 The Master runs all assignments, server crash handling, cluster start and
 stop, etc. In hbase-2.x, all that the Master does has been cast as
-Procedures run on a state machine engine. See
-[Procedure Framework](http://hbase.apache.org/book.html#pv2)
-and
-[Assignment Manager](http://hbase.apache.org/book.html#amv2)
+Procedures run on a state machine engine. See [Procedure 
Framework](https://hbase.apache.org/book.html#pv2)
+and [Assignment Manager](https://hbase.apache.org/book.html#amv2)
 for detail on how this infrastructure works. Each Procedure has a
 Procedure `id`', it's `pid`. You can trace the lifecycle of a
 Procedure as it logs each of its macro steps denoted by its
@@ -84,7 +82,8 @@ and is not letting go. More on these scenarios below.
 
 ### /master-status#tables
 
-This tab shows a list of tables with columns showing whether a
+This tab on the Master UI home-page shows a list of tables with
+columns showing whether a
 table _ENABLED_, _ENABLING_, _DISABLING_, or _DISABLED_ as well
 as other attributes of table. Also listed are columns with counts
 of Regions in their various transition states: _OPEN_, _CLOSED_,
@@ -127,7 +126,7 @@ in the hbase shell, do something similar. In our example, 
the
 Region belongs to the table _testtable_ and the Region
 start row is _d10c_ (For overview on parsing a Region
 name into its constituent parts, see
-[https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html](RegionInfo
 API)):
+[RegionInfo 
API](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html)):
 
 ```hbase> scan 'testtable', {STARTROW => 'd10c', LIMIT => 10}```
 
diff --git a/hbase-hbck2/docs/index.html b/hbase-hbck2/docs/index.html
new file mode 100644
index 000..29c67f7
--- /dev/null
+++ b/hbase-hbck2/docs/index.html
@@ -0,0 +1,5 @@
+
+  
+hello
+  
+



[27/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index b456cd2..9b964f6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -105,1302 +105,1320 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-101 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-102 * files, then we can delete it. This is 
because that, every time we call
-103 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
-104 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
-105 * deleted.
-106 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-107 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-108 */
-109@InterfaceAudience.Private
-110public class WALProcedureStore extends 
ProcedureStoreBase {
-111  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-112  public static final String LOG_PREFIX = 
"pv2-";
-113  /** Used to construct the name of the 
log directory for master procedures */
-114  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-115
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+101 * If we find out
+102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
+103 * files, then we can delete it. This is 
because that, every time we call
+104 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
+105 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
+106 * deleted.
+107 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
+108 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
+109 */
+110@InterfaceAudience.Private
+111public class WALProcedureStore extends 
ProcedureStoreBase {
+112  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
+113  public static final String LOG_PREFIX = 
"pv2-";
+114  /** Used to construct the name of the 
log directory for master procedures */
+115  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
 116
-117  public interface LeaseRecovery {
-118void recoverFileLease(FileSystem fs, 
Path path) throws IOException;
-119  }
-120
-121  public static final String 
WAL_COUNT_WARN_THRESHOLD_CONF_KEY =
-122
"hbase.procedure.store.wal.warn.threshold";
-123  private static final int 
DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10;
-124
-125  public static final String 
EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY =
-126
"hbase.procedure.store.wal.exec.cleanup.on.load";
-127  private static final boolean 
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true;
-128
-129  public static final String 
MAX_RETRIES_BEFORE_ROLL_CONF_KEY =
-130
"hbase.procedure.store.wal.max.retries.before.roll";
-131  private static final int 
DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3;
-132
-133  public static final String 
WAIT_BEFORE_ROLL_CONF_KEY =
-134
"hbase.procedure.store.wal.wait.before.roll";
-135  private static final int 
DEFAULT_WAIT_BEFORE_ROLL = 500;
-136
-137  public static final String 
ROLL_RETRIES_CONF_KEY =
-138
"hbase.procedure.store.wal.max.roll.retries";
-139  private static final int 
DEFAULT_ROLL_RETRIES = 3;
-140
-141  public static final String 
MAX_SYNC_FAILURE_ROLL_CONF_KEY =
-142
"hbase.procedure.store.wal.sync.failure.roll.max";
-143  private static final int 
DEFAULT_MAX_SYNC_FAILURE_ROLL = 3;
-144
-145  public static final String 
PERIODIC_ROLL_CONF_KEY =
-146
"hbase.procedure.store.wal.periodic.roll.msec";
-147  private static final int 
DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h
-148
-149  public static final String 
SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec";
-150  private static final int 
DEFAULT_SYNC_WAIT_MSEC = 100;
-151
-152  public static final String 
USE_HSYNC_CONF_KEY = "hbase.procedure.store.wal.use.hsync";
-153  private static final boolean 
DEFAULT_USE_HSYNC = true;
-154
-155  publ

hbase git commit: HBASE-21073 Redo concept of maintenance mode

2018-10-23 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 a31e71564 -> 2b675e36d


HBASE-21073 Redo concept of maintenance mode

Instead of being an ephemeral state set by hbck, maintenance mode is now
an explicit toggle set by either configuration property or environment
variable. In maintenance mode, master will host system tables and not
assign any user-space tables to RSs. This gives operators the ability to
affect repairs to meta table with fewer moving parts.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b675e36
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b675e36
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b675e36

Branch: refs/heads/branch-2.0
Commit: 2b675e36df5cdb0308c07ee9ce6fb50f7b3fab0e
Parents: a31e715
Author: Mike Drob 
Authored: Mon Oct 8 14:28:23 2018 -0500
Committer: Mike Drob 
Committed: Tue Oct 23 09:54:24 2018 -0500

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 111 ---
 .../hadoop/hbase/master/LoadBalancer.java   |   9 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   8 +-
 .../hadoop/hbase/master/MasterServices.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  25 ++--
 .../hbase/master/balancer/BaseLoadBalancer.java |  25 ++--
 .../hbase/regionserver/HRegionServer.java   |   1 +
 .../apache/hadoop/hbase/master/TestMaster.java  |   3 +-
 .../hbase/master/TestMasterRepairMode.java  | 138 +++
 .../zookeeper/MasterMaintenanceModeTracker.java |   4 +
 .../asciidoc/_chapters/troubleshooting.adoc |  18 +++
 11 files changed, 255 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b675e36/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 67152e2..3b3759f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -187,7 +187,6 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -304,9 +303,6 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   // Tracker for region normalizer state
   private RegionNormalizerTracker regionNormalizerTracker;
 
-  //Tracker for master maintenance mode setting
-  private MasterMaintenanceModeTracker maintenanceModeTracker;
-
   private ClusterSchemaService clusterSchemaService;
 
   public static final String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS =
@@ -416,6 +412,11 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   /** jetty server for master to redirect requests to regionserver infoServer 
*/
   private Server masterJettyServer;
 
+  // Determine if we should do normal startup or minimal "single-user" mode 
with no region
+  // servers and no user tables. Useful for repair and recovery of hbase:meta
+  private final boolean maintenanceMode;
+  static final String MAINTENANCE_MODE = "hbase.master.maintenance_mode";
+
   public static class RedirectServlet extends HttpServlet {
 private static final long serialVersionUID = 2894774810058302473L;
 private final int regionServerInfoPort;
@@ -475,6 +476,16 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 super(conf);
 TraceUtil.initTracer(conf);
 try {
+  if (conf.getBoolean(MAINTENANCE_MODE, false)) {
+LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE);
+maintenanceMode = true;
+  } else if (Boolean.getBoolean(MAINTENANCE_MODE)) {
+LOG.info("Detected {}=true via environment variables.", 
MAINTENANCE_MODE);
+maintenanceMode = true;
+  } else {
+maintenanceMode = false;
+  }
+
   this.rsFatals = new MemoryBoundedLogMessageBuffer(
   conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
   LOG.info("hbase.rootdir=" + getRootDir() +
@@ -659,6 +670,9 @@ public class HMaster extends HRegionServer implements 
MasterServices {
*/
   @Override
   protected void waitForMasterActive(){
+if (maintenanceMode) {
+  return;
+}
 boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);
 while

[32/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 9a00666..d4d6c86 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,13 +229,13 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 org.apache.hadoop.hbase.quotas.QuotaScope
-org.apache.hadoop.hbase.quotas.QuotaType
 org.apache.hadoop.hbase.quotas.ThrottleType
-org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.QuotaType
 org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type
-org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 5da5c49..85f0b9b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class HStore.StoreFlusherImpl
+private final class HStore.StoreFlusherImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements StoreFlushContext
 
@@ -279,7 +279,7 @@ implements 
 
 tracker
-private final FlushLifeCycleTracker tracker
+private final FlushLifeCycleTracker tracker
 
 
 
@@ -288,7 +288,7 @@ implements 
 
 cacheFlushSeqNum
-private final long cacheFlushSeqNum
+private final long cacheFlushSeqNum
 
 
 
@@ -297,7 +297,7 @@ implements 
 
 snapshot
-private MemStoreSnapshot snapshot
+private MemStoreSnapshot snapshot
 
 
 
@@ -306,7 +306,7 @@ implements 
 
 tempFiles
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tempFiles
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tempFiles
 
 
 
@@ -315,7 +315,7 @@ implements 
 
 committedFiles
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List committedFiles
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List committedFiles
 
 
 
@@ -324,7 +324,7 @@ implements 
 
 cacheFlushCount
-private long cacheFlushCount
+private long cacheFlushCount
 
 
 
@@ -333,7 +333,7 @@ implements 
 
 cacheFlushSize
-private long cacheFlushSize
+private long cacheFlushSize
 
 
 
@@ -342,7 +342,7 @@ implements 
 
 outputFileSize
-private long outputFileSize
+private long outputFileSize
 
 
 
@@ -359,7 +359,7 @@ implements 
 
 StoreFlusherImpl
-private StoreFlusherImpl(long cacheFlushSeqNum,
+private StoreFlusherImpl(long cacheFlushSeqNum,
  FlushLifeCycleTracker tracker)
 
 
@@ -377,7 +377,7 @@ implements 
 
 prepare
-public MemStoreSize prepare()
+public MemStoreSize prepare()
 This is not thread safe. The caller should have a lock on 
the region or the store.
  If necessary, the lock can be added with the patch provided in 
HBASE-10087
 
@@ -394,7 +394,7 @@ implements 
 
 flushCache
-public void flushCache(MonitoredTask status)
+public void flushCache(MonitoredTask status)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: StoreFlushContext
 Flush the cache (create the new store file)
@@ -415,7 +415,7 @@ implements 
 
 commit
-public boolean commit(MonitoredTask status)
+public boolean commit(MonitoredTask status)
throws https://docs.o

[20/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
index 9365340..4755997 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class ProcedureTestingUtility.LoadCounter
+public static class ProcedureTestingUtility.LoadCounter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader
 
@@ -277,7 +277,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 corrupted
-private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList corrupted
+private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList corrupted
 
 
 
@@ -286,7 +286,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 completed
-private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList completed
+private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList completed
 
 
 
@@ -295,7 +295,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 runnable
-private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList runnable
+private final https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList runnable
 
 
 
@@ -304,7 +304,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 procIds
-private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetLong> procIds
+private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetLong> procIds
 
 
 
@@ -313,7 +313,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 maxProcId
-private long maxProcId
+private long maxProcId
 
 
 
@@ -330,7 +330,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 LoadCounter
-public LoadCounter()
+public LoadCounter()
 
 
 
@@ -339,7 +339,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 LoadCounter
-public LoadCounter(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetLong> procIds)
+public LoadCounter(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetLong> procIds)
 
 
 
@@ -356,7 +356,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 reset
-public void reset()
+public void reset()
 
 
 
@@ -365,7 +365,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 reset
-public void reset(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetLong> procIds)
+public void reset(https://docs.oracle.com/javase/8/docs/api/java/util/Set.h

[19/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
index 5ba8edf..d56d805 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -262,10 +262,28 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
boolean abortOnCorruption) 
 
 
+static void
+initAndStartWorkers(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
+   int numThreads,
+   boolean abortOnCorruption,
+   boolean startWorkers) 
+
+
 static  void
 restart(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor) 
 
-
+
+static  void
+restart(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
+   boolean abort) 
+
+
+static  void
+restart(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
+   boolean abort,
+   boolean startWorkers) 
+
+
 static  void
 restart(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
boolean avoidTestKillDuringRestart,
@@ -274,37 +292,48 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid> actionBeforeStartWorker,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid> startAction) 
 
-
+
+static  void
+restart(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
+   boolean avoidTestKillDuringRestart,
+   boolean failOnCorrupted,
+   https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid> stopAction,
+   https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid> actionBeforeStartWorker,
+   https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid> startAction,
+   boolean abort,
+   boolean startWorkers) 
+
+
 static  void
 setKillAndToggleBeforeStoreUpdate(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
  boolean value) 
 
-
+
 static  void
 setKillBeforeStoreUpdate(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
 boolean value) 
 
-
+
 static  void
 setKillIfHasParent(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
   boolean value) 
 
-
+
 static  void
 setKillIfSuspended(org.apache.hadoop.hbase.procedure2.ProcedureExecutor procExecutor,
   boolean value) 
 
-
+
 static  void
 setToggleKillBeforeStoreUpdate(org.apache.hadoop.hbase.procedure

[25/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index b456cd2..9b964f6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -105,1302 +105,1320 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-101 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-102 * files, then we can delete it. This is 
because that, every time we call
-103 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
-104 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
-105 * deleted.
-106 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-107 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-108 */
-109@InterfaceAudience.Private
-110public class WALProcedureStore extends 
ProcedureStoreBase {
-111  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-112  public static final String LOG_PREFIX = 
"pv2-";
-113  /** Used to construct the name of the 
log directory for master procedures */
-114  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-115
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+101 * If we find out
+102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
+103 * files, then we can delete it. This is 
because that, every time we call
+104 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
+105 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
+106 * deleted.
+107 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
+108 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
+109 */
+110@InterfaceAudience.Private
+111public class WALProcedureStore extends 
ProcedureStoreBase {
+112  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
+113  public static final String LOG_PREFIX = 
"pv2-";
+114  /** Used to construct the name of the 
log directory for master procedures */
+115  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
 116
-117  public interface LeaseRecovery {
-118void recoverFileLease(FileSystem fs, 
Path path) throws IOException;
-119  }
-120
-121  public static final String 
WAL_COUNT_WARN_THRESHOLD_CONF_KEY =
-122
"hbase.procedure.store.wal.warn.threshold";
-123  private static final int 
DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10;
-124
-125  public static final String 
EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY =
-126
"hbase.procedure.store.wal.exec.cleanup.on.load";
-127  private static final boolean 
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true;
-128
-129  public static final String 
MAX_RETRIES_BEFORE_ROLL_CONF_KEY =
-130
"hbase.procedure.store.wal.max.retries.before.roll";
-131  private static final int 
DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3;
-132
-133  public static final String 
WAIT_BEFORE_ROLL_CONF_KEY =
-134
"hbase.procedure.store.wal.wait.before.roll";
-135  private static final int 
DEFAULT_WAIT_BEFORE_ROLL = 500;
-136
-137  public static final String 
ROLL_RETRIES_CONF_KEY =
-138
"hbase.procedure.store.wal.max.roll.retries";
-139  private static final int 
DEFAULT_ROLL_RETRIES = 3;
-140
-141  public static final String 
MAX_SYNC_FAILURE_ROLL_CONF_KEY =
-142
"hbase.procedure.store.wal.sync.failure.roll.max";
-143  private static final int 
DEFAULT_MAX_SYNC_FAILURE_ROLL = 3;
-144
-145  public static final String 
PERIODIC_ROLL_CONF_KEY =
-146
"hbase.procedure.store.wal.periodic.roll.msec";
-147  private static final int 
DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h
-148
-149  public static final String 
SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec";
-150  private static final int 
DEFAULT_SYNC_WAIT_MSEC = 100;
-151
-152  public static final String 
USE_HSYNC_CONF_KEY = "hbase.procedure.store.wal.use.hsync";
-153  private static final boolean 
DEFAULT_USE_HSYNC = true;
-154
-155  public static final String 
ROLL_THRESHOLD_CONF_K

[11/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static  void 
restart(final ProcedureExecutor procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static  void 
restart(final ProcedureExecutor procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static  void 
restart(ProcedureExecutor procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
-082  Callable 
actionBeforeStartWorker, Callable startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static  void 
restart(ProcedureExecutor procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+099  Callable 
actionBeforeStartWorker, Callable startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static  void 
restart(ProcedureExecutor procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+106  Callable 
actionBeforeStartWorker, Callable startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.

[14/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/SecureTestUtil.AccessTestAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/SecureTestUtil.AccessTestAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/SecureTestUtil.AccessTestAction.html
index 7230844f..55e5bdb 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/SecureTestUtil.AccessTestAction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/SecureTestUtil.AccessTestAction.html
@@ -143,6 +143,19 @@
 
 
 Uses of SecureTestUtil.AccessTestAction in 
org.apache.hadoop.hbase.security.access
+
+Classes in org.apache.hadoop.hbase.security.access
 that implement SecureTestUtil.AccessTestAction 
+
+Modifier and Type
+Class and Description
+
+
+
+private class 
+TestAccessController.BulkLoadAccessTestAction 
+
+
+
 
 Methods in org.apache.hadoop.hbase.security.access
 that return SecureTestUtil.AccessTestAction 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadAccessTestAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadAccessTestAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadAccessTestAction.html
new file mode 100644
index 000..be6d8f6
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadAccessTestAction.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+
+No usage of 
org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadHelper.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadHelper.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadHelper.html
index 3b0b6f1..4713611 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadHelper.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestAccessController.BulkLoadHelper.html
@@ -72,7 +72,51 @@
 
 Uses of 
Classorg.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadHelper
 
-No usage of 
org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadHelper
+
+
+
+
+Packages that use TestAccessController.BulkLoadHelper 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.security.access
+ 
+
+
+
+
+
+
+
+
+
+Uses of TestAccessController.BulkLoadHelper
 in org.apache.hadoop.hbase.security.access
+
+Methods in org.apache.hadoop.hbase.security.access
 that return TestAccessController.BulkLoadHelper 
+
+Modifier and Type
+Method and Description
+
+
+
+private TestAccessController.BulkLoadHelper
+TestAccessController.BulkLoadHelper.initHFileData(byte[] family,
+ byte[] qualifier,
+ byte[][][

[09/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static  void 
restart(final ProcedureExecutor procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static  void 
restart(final ProcedureExecutor procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static  void 
restart(ProcedureExecutor procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
-082  Callable 
actionBeforeStartWorker, Callable startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static  void 
restart(ProcedureExecutor procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+099  Callable 
actionBeforeStartWorker, Callable startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static  void 
restart(ProcedureExecutor procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+106  Callable 
actionBeforeStartWorker, Callable startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.testing = testing;
-119}
-120  }
-121
-122  public static void 
storeRestart(ProcedureStore procStore, ProcedureStore.Procedur

[33/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index 07c9e88..7028017 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class WALProcedureStore
+public class WALProcedureStore
 extends ProcedureStoreBase
 WAL implementation of the ProcedureStore.
  
@@ -155,7 +155,8 @@ extends ProcedureStoreTracker.resetTo(ProcedureStoreTracker,
 boolean), and then merge it
  with the tracker of every newer wal files, using the
- ProcedureStoreTracker.setDeletedIfModifiedInBoth(ProcedureStoreTracker).
 If we find out
+ ProcedureStoreTracker.setDeletedIfModifiedInBoth(ProcedureStoreTracker,
 boolean).
+ If we find out
  that all the modified procedures for the oldest wal file are modified or 
deleted in newer wal
  files, then we can delete it. This is because that, every time we call
  ProcedureStore.insert(Procedure[])
 or ProcedureStore.update(Procedure),
 we will
@@ -529,7 +530,7 @@ extends 
 private void
-closeCurrentLogStream() 
+closeCurrentLogStream(boolean abort) 
 
 
 void
@@ -693,7 +694,8 @@ extends 
 private void
-removeAllLogs(long lastLogId)
+removeAllLogs(long lastLogId,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why)
 Remove all logs with logId <= lastLogId.
 
 
@@ -719,7 +721,7 @@ extends rollWriter(long logId) 
 
 
-(package private) boolean
+boolean
 rollWriterForTesting() 
 
 
@@ -823,7 +825,7 @@ extends 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -832,7 +834,7 @@ extends 
 
 LOG_PREFIX
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String LOG_PREFIX
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String LOG_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -845,7 +847,7 @@ extends 
 
 MASTER_PROCEDURE_LOGDIR
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MASTER_PROCEDURE_LOGDIR
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MASTER_PROCEDURE_LOGDIR
 Used to construct the name of the log directory for master 
procedures
 
 See Also:
@@ -859,7 +861,7 @@ extends 
 
 WAL_COUNT_WARN_THRESHOLD_CONF_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WAL_COUNT_WARN_THRESHOLD_CONF_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WAL_COUNT_WARN_THRESHOLD_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -872,7 +874,7 @@ extends 
 
 DEFAULT_WAL_COUNT_WARN_THRESHOLD
-private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD
+private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD
 
 See Also:
 Constant
 Field Values
@@ -885,7 +887,7 @@ extends 
 
 EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -898,7 +900,7 @@ extends 
 
 DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
-private static final boolean DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
+private static final boolean DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -911,7 +913,7 @@ extends 
 
 MAX_RETRIES_BEFORE_ROLL_CONF_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MAX_RETRIES_BEFORE_ROLL_CONF_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MAX_RETRIES_BEFORE_ROLL_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -924,7 +926,7 @@ extends 
 
 DEFAULT_MAX_RETRIES_BEFORE_ROLL
-private static final int DEFAULT_MAX_

[16/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
new file mode 100644
index 000..c479bc8
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestAccessController.BulkLoadAccessTestAction (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.security.access
+Class TestAccessController.BulkLoadAccessTestAction
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/security/PrivilegedExceptionAction.html?is-external=true";
 title="class or interface in 
java.security">PrivilegedExceptionActionObject>, SecureTestUtil.AccessTestAction
+
+
+Enclosing class:
+TestAccessController
+
+
+
+private class TestAccessController.BulkLoadAccessTestAction
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements SecureTestUtil.AccessTestAction
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.fs.permission.FsPermission
+filePermission 
+
+
+private org.apache.hadoop.fs.Path
+testDataDir 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+BulkLoadAccessTestAction(org.apache.hadoop.fs.permission.FsPermission perm,
+
org.apache.hadoop.fs.Path testDataDir) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+run() 
+
+
+
+
+
+
+Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/a

[28/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
index b456cd2..9b964f6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
@@ -105,1302 +105,1320 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-101 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-102 * files, then we can delete it. This is 
because that, every time we call
-103 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
-104 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
-105 * deleted.
-106 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-107 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-108 */
-109@InterfaceAudience.Private
-110public class WALProcedureStore extends 
ProcedureStoreBase {
-111  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-112  public static final String LOG_PREFIX = 
"pv2-";
-113  /** Used to construct the name of the 
log directory for master procedures */
-114  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-115
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+101 * If we find out
+102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
+103 * files, then we can delete it. This is 
because that, every time we call
+104 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
+105 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
+106 * deleted.
+107 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
+108 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
+109 */
+110@InterfaceAudience.Private
+111public class WALProcedureStore extends 
ProcedureStoreBase {
+112  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
+113  public static final String LOG_PREFIX = 
"pv2-";
+114  /** Used to construct the name of the 
log directory for master procedures */
+115  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
 116
-117  public interface LeaseRecovery {
-118void recoverFileLease(FileSystem fs, 
Path path) throws IOException;
-119  }
-120
-121  public static final String 
WAL_COUNT_WARN_THRESHOLD_CONF_KEY =
-122
"hbase.procedure.store.wal.warn.threshold";
-123  private static final int 
DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10;
-124
-125  public static final String 
EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY =
-126
"hbase.procedure.store.wal.exec.cleanup.on.load";
-127  private static final boolean 
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true;
-128
-129  public static final String 
MAX_RETRIES_BEFORE_ROLL_CONF_KEY =
-130
"hbase.procedure.store.wal.max.retries.before.roll";
-131  private static final int 
DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3;
-132
-133  public static final String 
WAIT_BEFORE_ROLL_CONF_KEY =
-134
"hbase.procedure.store.wal.wait.before.roll";
-135  private static final int 
DEFAULT_WAIT_BEFORE_ROLL = 500;
-136
-137  public static final String 
ROLL_RETRIES_CONF_KEY =
-138
"hbase.procedure.store.wal.max.roll.retries";
-139  private static final int 
DEFAULT_ROLL_RETRIES = 3;
-140
-141  public static final String 
MAX_SYNC_FAILURE_ROLL_CONF_KEY =
-142
"hbase.procedure.store.wal.sync.failure.roll.max";
-143  private static final int 
DEFAULT_MAX_SYNC_FAILURE_ROLL = 3;
-144
-145  public static final String 
PERIODIC_ROLL_CONF_KEY =
-146
"hbase.procedure.store.wal.periodic.roll.msec";
-147  private static final int 
DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h
-148
-149  public static final String 
SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec";
-150  private static final int 
DEFAULT_SYNC_WAIT_MSEC = 100;
-151
-152  public static final String 
USE_HSYNC_CONF_KEY = "hbase.procedure.store.wal.use.hsync";
-153  private static final boolean 
DEFAULT_USE_HSYN

[34/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
index ae204cf..9f55028 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -251,82 +251,90 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 private BitSetNode
+getOrCreateNodeNoGrowOrMerge(long procId) 
+
+
+private BitSetNode
+getOrCreateNodeWithGrowOrMerge(long procId) 
+
+
+private BitSetNode
 growNode(BitSetNode node,
 long procId)
 Grows node to contain procId and 
updates the map.
 
 
-
+
 private BitSetNode
 insert(BitSetNode node,
   long procId) 
 
-
+
 void
 insert(long procId) 
 
-
+
 void
 insert(long[] procIds) 
 
-
+
 void
 insert(long procId,
   long[] subProcIds) 
 
-
+
 boolean
 isAllModified() 
 
-
+
 ProcedureStoreTracker.DeleteState
 isDeleted(long procId)
 If partial
 is false, returns state from the bitmap.
 
 
-
+
 boolean
 isEmpty() 
 
-
+
 boolean
 isModified(long procId) 
 
-
+
 boolean
 isPartial() 
 
-
+
 private BitSetNode
 lookupClosestNode(BitSetNode node,
  long procId)
 lookup the node containing the specified procId.
 
 
-
+
 private BitSetNode
 mergeNodes(BitSetNode leftNode,
   BitSetNode rightNode)
 Merges leftNode & rightNode and 
updates the map.
 
 
-
+
 void
 reset() 
 
-
+
 void
 resetModified()
 Clears the list of updated procedure ids.
 
 
-
+
 void
 resetTo(ProcedureStoreTracker tracker)
 Resets internal state to same as given 
tracker.
 
 
-
+
 void
 resetTo(ProcedureStoreTracker tracker,
boolean resetDelete)
@@ -334,46 +342,47 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  to the modified flag if resetDelete is true.
 
 
-
+
 void
 resetToProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) 
 
-
+
 void
 setDeleted(long procId,
   boolean isDeleted)
 This method is used when restarting where we need to 
rebuild the ProcedureStoreTracker.
 
 
-
+
 void
 setDeletedIfModified(long... procId)
 Set the given bit for the procId to delete if it was 
modified before.
 
 
-
+
 void
-setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker)
+setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker,
+  boolean globalTracker)
 Similar with setDeletedIfModified(long...),
 but here the procId are given by
  the tracker.
 
 
-
+
 void
 setKeepDeletes(boolean keepDeletes) 
 
-
+
 void
 setMinMaxModifiedProcIds(long min,
 long max)
 Will be called when restarting where we need to rebuild the 
ProcedureStoreTracker.
 
 
-
+
 void
 setPartialFlag(boolean isPartial) 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureStoreTracker
 toProto()
 Builds
@@ -381,16 +390,16 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  protocol buffer from current state.
 
 
-
+
 private void
 trackProcIds(long procId) 
 
-
+
 private BitSetNode
 update(BitSetNode node,
   long procId) 
 
-
+
 void
 update(long procId) 
 
@@ -652,13 +661,14 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  then we can delete it.
 
 
-
+
 
 
 
 
 setDeletedIfModifiedInBoth
-public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker)
+public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker,
+   boolean globalTracker)
 Similar with setDeletedIfModified(long...),
 but here the procId are given by
  the tracker. If a procedure is modified by us, and also by the 
given tracker,
  then we mark it as deleted.
@@ -674,7 +684,7 @@ extends https://docs.oracle.com/javase/8/docs/api/

[07/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.WaitProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.WaitProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.WaitProcedure.html
new file mode 100644
index 000..6db16bd
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.WaitProcedure.html
@@ -0,0 +1,314 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import static 
org.junit.Assert.assertTrue;
+021
+022import 
java.util.concurrent.CountDownLatch;
+023
+024import org.apache.hadoop.fs.FileSystem;
+025import org.apache.hadoop.fs.Path;
+026import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+027import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+028import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+029import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+030import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+031import org.junit.Assert;
+032import org.junit.BeforeClass;
+033import org.junit.ClassRule;
+034import org.junit.Test;
+035import 
org.junit.experimental.categories.Category;
+036import org.slf4j.Logger;
+037import org.slf4j.LoggerFactory;
+038
+039
+040@Category({MasterTests.class, 
SmallTests.class})
+041public class TestProcedureCleanup {
+042  @ClassRule public static final 
HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
+043  
.forClass(TestProcedureCleanup.class);
+044
+045
+046  private static final Logger LOG = 
LoggerFactory.getLogger(TestProcedureCleanup.class);
+047  private static final int 
PROCEDURE_EXECUTOR_SLOTS = 1;
+048
+049  private static TestProcEnv procEnv;
+050  private static WALProcedureStore 
procStore;
+051
+052  private static 
ProcedureExecutor procExecutor;
+053
+054  private static 
HBaseCommonTestingUtility htu;
+055
+056  private static FileSystem fs;
+057  private static Path testDir;
+058  private static Path logDir;
+059
+060  private static class TestProcEnv {
+061
+062  }
+063
+064  private void createProcExecutor(String 
dir) throws Exception {
+065logDir = new Path(testDir, dir);
+066procStore = 
ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir);
+067procExecutor = new 
ProcedureExecutor<>(htu.getConfiguration(), procEnv,
+068procStore);
+069
procStore.start(PROCEDURE_EXECUTOR_SLOTS);
+070ProcedureTestingUtility
+071
.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true, true);
+072  }
+073
+074  @BeforeClass
+075  public static void setUp() throws 
Exception {
+076htu = new 
HBaseCommonTestingUtility();
+077
+078// NOTE: The executor will be created 
by each test
+079procEnv = new TestProcEnv();
+080testDir = htu.getDataTestDir();
+081fs = 
testDir.getFileSystem(htu.getConfiguration());
+082assertTrue(testDir.depth() > 1);
+083
+084
+085  }
+086
+087  @Test
+088  public void 
testProcedureShouldNotCleanOnLoad() throws Exception {
+089
createProcExecutor("testProcedureShouldNotCleanOnLoad");
+090final RootProcedure proc = new 
RootProcedure();
+091long rootProc = 
procExecutor.submitProcedure(proc);
+092LOG.info("Begin to execute " + 
rootProc);
+093// wait until the child procedure 
arrival
+094
while(procExecutor.getProcedures().size() < 2) {
+095  Thread.sleep(100);
+096}
+097SuspendProcedure suspendProcedure = 
(SuspendProcedure) procExecutor
+098.getProcedures().get(1);
+099// wait until the suspendProcedure 
executed
+100suspendProcedure.latch.countDown();
+101Thread.sleep(100);
+102// roll the procedure log
+103LOG.info("Begin to roll log ");
+104procStore.rollWriterForTesting();
+105LOG.info("finish to roll log ");
+106Thread.sleep(500);
+107LOG.info

[10/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.TestProcedure.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static  void 
restart(final ProcedureExecutor procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static  void 
restart(final ProcedureExecutor procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static  void 
restart(ProcedureExecutor procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
-082  Callable 
actionBeforeStartWorker, Callable startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static  void 
restart(ProcedureExecutor procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+099  Callable 
actionBeforeStartWorker, Callable startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static  void 
restart(ProcedureExecutor procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+106  Callable 
actionBeforeStartWorker, Callable startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.testing = testing;
-119}
-120  }
-121
-122  public stati

[06/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
new file mode 100644
index 000..5062e9b
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
@@ -0,0 +1,3705 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.security.access;
+019
+020import static 
org.apache.hadoop.hbase.AuthUtil.toGroupEntry;
+021import static 
org.junit.Assert.assertArrayEquals;
+022import static 
org.junit.Assert.assertEquals;
+023import static 
org.junit.Assert.assertFalse;
+024import static 
org.junit.Assert.assertNotNull;
+025import static 
org.junit.Assert.assertTrue;
+026import static org.junit.Assert.fail;
+027
+028import 
com.google.protobuf.BlockingRpcChannel;
+029import com.google.protobuf.RpcCallback;
+030import 
com.google.protobuf.RpcController;
+031import com.google.protobuf.Service;
+032import 
com.google.protobuf.ServiceException;
+033import java.io.IOException;
+034import java.security.PrivilegedAction;
+035import java.util.ArrayList;
+036import java.util.Arrays;
+037import java.util.Collection;
+038import java.util.Collections;
+039import java.util.List;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.fs.CommonConfigurationKeys;
+042import org.apache.hadoop.fs.FileStatus;
+043import org.apache.hadoop.fs.FileSystem;
+044import org.apache.hadoop.fs.Path;
+045import 
org.apache.hadoop.fs.permission.FsPermission;
+046import 
org.apache.hadoop.hbase.Coprocessor;
+047import 
org.apache.hadoop.hbase.CoprocessorEnvironment;
+048import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+049import 
org.apache.hadoop.hbase.HBaseIOException;
+050import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+051import 
org.apache.hadoop.hbase.HColumnDescriptor;
+052import 
org.apache.hadoop.hbase.HConstants;
+053import 
org.apache.hadoop.hbase.HRegionInfo;
+054import 
org.apache.hadoop.hbase.HRegionLocation;
+055import 
org.apache.hadoop.hbase.HTableDescriptor;
+056import 
org.apache.hadoop.hbase.KeyValue;
+057import 
org.apache.hadoop.hbase.MiniHBaseCluster;
+058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+059import 
org.apache.hadoop.hbase.ServerName;
+060import 
org.apache.hadoop.hbase.TableName;
+061import 
org.apache.hadoop.hbase.TableNotFoundException;
+062import 
org.apache.hadoop.hbase.client.Admin;
+063import 
org.apache.hadoop.hbase.client.Append;
+064import 
org.apache.hadoop.hbase.client.Connection;
+065import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+066import 
org.apache.hadoop.hbase.client.Delete;
+067import 
org.apache.hadoop.hbase.client.Get;
+068import 
org.apache.hadoop.hbase.client.Increment;
+069import 
org.apache.hadoop.hbase.client.MasterSwitchType;
+070import 
org.apache.hadoop.hbase.client.Put;
+071import 
org.apache.hadoop.hbase.client.RegionLocator;
+072import 
org.apache.hadoop.hbase.client.Result;
+073import 
org.apache.hadoop.hbase.client.ResultScanner;
+074import 
org.apache.hadoop.hbase.client.Scan;
+075import 
org.apache.hadoop.hbase.client.SnapshotDescription;
+076import 
org.apache.hadoop.hbase.client.Table;
+077import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
+078import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+079import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+080import 
org.apache.hadoop.hbase.coprocessor.ObserverContextImpl;
+081import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+082import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+083import 
org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;

[36/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index c084b41..34fbb96 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -291,10 +291,10 @@
  Warnings
  Errors
 
-3796
+3797
 0
 0
-15099
+15098
 
 Files
 
@@ -8287,7 +8287,7 @@
 org/apache/hadoop/hbase/security/access/TestAccessController.java
 0
 0
-18
+17
 
 org/apache/hadoop/hbase/security/access/TestAccessController2.java
 0
@@ -9734,7 +9734,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces";>NeedBraces
-1798
+1797
  Error
 
 coding
@@ -71446,13 +71446,13 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-197
+223
 
  Error
 blocks
 LeftCurly
 '{' at column 39 should have line break after.
-390
+416
 
 org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
 
@@ -71902,7 +71902,7 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-233
+250
 
 org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
 
@@ -72127,25 +72127,25 @@
 sizes
 LineLength
 Line is longer than 100 characters (found 105).
-251
+252
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-654
+655
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-941
+942
 
  Error
 indentation
 Indentation
 'if' child has incorrect indentation level 8, expected level should be 
6.
-1079
+1080
 
 org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
 
@@ -77791,259 +77791,259 @@
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-133
+134
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-236
+237
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-239
+240
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-240
+241
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-396
+397
 
  Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-519
+520
 
  Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-531
+532
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-555
+556
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 113).
-570
+571
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-590
+591
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-592
+593
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-626
+627
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-641
+642
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-647
+648
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-648
+649
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-649
+650
 
  Error
 indentation
 Indentation
 'if' child has incorrect indentation level 6, expected level should be 
8.
-844
+846
 
  Error
 indentation
 Indentation
 'method call' child has incorrect indentation level 9, expected level 
should be 10.
-845
+847
 
  Error
 indentation
 Indentation
 'method call' child has incorrect indentation level 9, expected level 
should be 10.
-846
+848
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-849
+851
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-857
+859
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-969
+971
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-975
+977
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1003
+1005
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1004
+1006
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1005
+1007
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1057
+1059
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1058
+1060
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1060
+1062
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1083
+1085
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1152
+1154
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1153
+1155
 
  Error
 javadoc
 NonEmptyAtclauseDe

[04/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
index dc287aa..5062e9b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
@@ -163,848 +163,848 @@
 155  public static final HBaseClassTestRule 
CLASS_RULE =
 156  
HBaseClassTestRule.forClass(TestAccessController.class);
 157
-158  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
-159  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
-160  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-161  private static Configuration conf;
-162
-163  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
-164   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
-165   * gets  eclipsed by the system user. 
*/
-166  private static Connection 
systemUserConnection;
-167
+158  private static final FsPermission 
FS_PERMISSION_ALL = FsPermission.valueOf("-rwxrwxrwx");
+159  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+160  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
+161  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+162  private static Configuration conf;
+163
+164  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
+165   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
+166   * gets  eclipsed by the system user. 
*/
+167  private static Connection 
systemUserConnection;
 168
-169  // user with all permissions
-170  private static User SUPERUSER;
-171  // user granted with all global 
permission
-172  private static User USER_ADMIN;
-173  // user with rw permissions on column 
family.
-174  private static User USER_RW;
-175  // user with read-only permissions
-176  private static User USER_RO;
-177  // user is table owner. will have all 
permissions on table
-178  private static User USER_OWNER;
-179  // user with create table permissions 
alone
-180  private static User USER_CREATE;
-181  // user with no permissions
-182  private static User USER_NONE;
-183  // user with admin rights on the column 
family
-184  private static User USER_ADMIN_CF;
-185
-186  private static final String GROUP_ADMIN 
= "group_admin";
-187  private static final String 
GROUP_CREATE = "group_create";
-188  private static final String GROUP_READ 
= "group_read";
-189  private static final String GROUP_WRITE 
= "group_write";
-190
-191  private static User USER_GROUP_ADMIN;
-192  private static User 
USER_GROUP_CREATE;
-193  private static User USER_GROUP_READ;
-194  private static User USER_GROUP_WRITE;
-195
-196  // TODO: convert this test to cover the 
full matrix in
-197  // 
https://hbase.apache.org/book/appendix_acl_matrix.html
-198  // creating all Scope x Permission 
combinations
-199
-200  private static TableName TEST_TABLE2 = 
TableName.valueOf("testtable2");
-201  private static byte[] TEST_FAMILY = 
Bytes.toBytes("f1");
-202  private static byte[] TEST_QUALIFIER = 
Bytes.toBytes("q1");
-203  private static byte[] TEST_ROW = 
Bytes.toBytes("r1");
-204
-205  private static 
MasterCoprocessorEnvironment CP_ENV;
-206  private static AccessController 
ACCESS_CONTROLLER;
-207  private static 
RegionServerCoprocessorEnvironment RSCP_ENV;
-208  private static 
RegionCoprocessorEnvironment RCP_ENV;
-209
-210  @Rule
-211  public TestName name = new 
TestName();
-212
-213  @BeforeClass
-214  public static void setupBeforeClass() 
throws Exception {
-215// setup configuration
-216conf = 
TEST_UTIL.getConfiguration();
-217// Up the handlers; this test needs 
more than usual.
-218
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
-219
-220
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-221  
MyShellBasedUnixGroupsMapping.class.getName());
-222
UserGroupInformation.setConfiguration(conf);
-223
-224// Enable security
-225enableSecurity(conf);
-226// In this particular test case, we 
can't use SecureBulkLoadEndpoint because its doAs will fail
-227// to move a file for a random user
-228
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
AccessController.class.getName());
-229// Verify ena

[35/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 6326ca3..e027e1c 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2018 The Apache Software Foundation
 
-  File: 3796,
- Errors: 15099,
+  File: 3797,
+ Errors: 15098,
  Warnings: 0,
  Infos: 0
   
@@ -4017,7 +4017,7 @@ under the License.
   0
 
 
-  18
+  17
 
   
   
@@ -27850,6 +27850,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.java";>org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.TestMasterMetrics.java";>org/apache/hadoop/hbase/master/TestMasterMetrics.java
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/coc.html
--
diff --git a/coc.html b/coc.html
index cbe6bb9..7c3c028 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -385,7 +385,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 60010d7..716b835 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -450,7 +450,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 3a4e1db..309363f 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -889,7 +889,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 2376bdc..aa0a365 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -323,7 +323,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 5d62f6c..5852a35 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependency Management
 
@@ -1015,7 +1015,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-

[31/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 940e63e..de64998 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HStore
+public class HStore
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
 A Store holds a column family in a Region.  Its a memstore 
and a set of zero
@@ -1275,7 +1275,7 @@ implements 
 
 MEMSTORE_CLASS_NAME
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
 
 See Also:
 Constant
 Field Values
@@ -1288,7 +1288,7 @@ implements 
 
 COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
 
 See Also:
 Constant
 Field Values
@@ -1301,7 +1301,7 @@ implements 
 
 BLOCKING_STOREFILES_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
 
 See Also:
 Constant
 Field Values
@@ -1314,7 +1314,7 @@ implements 
 
 BLOCK_STORAGE_POLICY_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
 
 See Also:
 Constant
 Field Values
@@ -1327,7 +1327,7 @@ implements 
 
 DEFAULT_BLOCK_STORAGE_POLICY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DEFAULT_BLOCK_STORAGE_POLICY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DEFAULT_BLOCK_STORAGE_POLICY
 
 See Also:
 Constant
 Field Values
@@ -1340,7 +1340,7 @@ implements 
 
 DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
-public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
+public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
 
 See Also:
 Constant
 Field Values
@@ -1353,7 +1353,7 @@ implements 
 
 DEFAULT_BLOCKING_STOREFILE_COUNT
-public static final int DEFAULT_BLOCKING_STOREFILE_COUNT
+public static final int DEFAULT_BLOCKING_STOREFILE_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1366,7 +1366,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1375,7 +1375,7 @@ implements 
 
 memstore
-protected final MemStore memstore
+protected final MemStore memstore
 
 
 
@@ -1384,7 +1384,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -1393,7 +1393,7 @@ implements 
 
 family
-private final ColumnFamilyDescriptor 
family
+private final ColumnFamilyDescriptor 
family
 
 
 
@@ -1402,7 +1402,7 @@ implements 
 
 fs
-private final HRegionFileSystem fs
+private final HRegionFileSystem fs
 
 
 
@@ -1411,7 +1411,7 @@ implements 
 
 conf
-protected org.apache.hadoop.conf.Configuration conf
+protected org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -1420,7 +1420,7 @@ implements 
 
 cacheConf
-protected CacheConfig cacheConf
+protected CacheConfig cacheConf
 
 
 
@@ -1429,7 +1429,7 @@ implements 
 
 lastCompactSize
-private long lastCompactSize
+private long lastCompactSize
 
 
 
@@ -1438,7 +1438,7 @@ implements 
 
 forceMajor
-volatile boolean forceMajor
+volatile boolean forceMajor
 
 
 
@@ -1447,7 +1447,7 @@ implements 
 
 closeCheckInterval
-static int closeCheckInterval
+static int closeCheckInterval
 
 
 
@@ -1456,7 +1456,7 @@ implements 

[08/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.RootProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.RootProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.RootProcedure.html
new file mode 100644
index 000..6db16bd
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.RootProcedure.html
@@ -0,0 +1,314 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import static 
org.junit.Assert.assertTrue;
+021
+022import 
java.util.concurrent.CountDownLatch;
+023
+024import org.apache.hadoop.fs.FileSystem;
+025import org.apache.hadoop.fs.Path;
+026import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+027import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+028import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+029import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+030import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+031import org.junit.Assert;
+032import org.junit.BeforeClass;
+033import org.junit.ClassRule;
+034import org.junit.Test;
+035import 
org.junit.experimental.categories.Category;
+036import org.slf4j.Logger;
+037import org.slf4j.LoggerFactory;
+038
+039
+040@Category({MasterTests.class, 
SmallTests.class})
+041public class TestProcedureCleanup {
+042  @ClassRule public static final 
HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
+043  
.forClass(TestProcedureCleanup.class);
+044
+045
+046  private static final Logger LOG = 
LoggerFactory.getLogger(TestProcedureCleanup.class);
+047  private static final int 
PROCEDURE_EXECUTOR_SLOTS = 1;
+048
+049  private static TestProcEnv procEnv;
+050  private static WALProcedureStore 
procStore;
+051
+052  private static 
ProcedureExecutor procExecutor;
+053
+054  private static 
HBaseCommonTestingUtility htu;
+055
+056  private static FileSystem fs;
+057  private static Path testDir;
+058  private static Path logDir;
+059
+060  private static class TestProcEnv {
+061
+062  }
+063
+064  private void createProcExecutor(String 
dir) throws Exception {
+065logDir = new Path(testDir, dir);
+066procStore = 
ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir);
+067procExecutor = new 
ProcedureExecutor<>(htu.getConfiguration(), procEnv,
+068procStore);
+069
procStore.start(PROCEDURE_EXECUTOR_SLOTS);
+070ProcedureTestingUtility
+071
.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true, true);
+072  }
+073
+074  @BeforeClass
+075  public static void setUp() throws 
Exception {
+076htu = new 
HBaseCommonTestingUtility();
+077
+078// NOTE: The executor will be created 
by each test
+079procEnv = new TestProcEnv();
+080testDir = htu.getDataTestDir();
+081fs = 
testDir.getFileSystem(htu.getConfiguration());
+082assertTrue(testDir.depth() > 1);
+083
+084
+085  }
+086
+087  @Test
+088  public void 
testProcedureShouldNotCleanOnLoad() throws Exception {
+089
createProcExecutor("testProcedureShouldNotCleanOnLoad");
+090final RootProcedure proc = new 
RootProcedure();
+091long rootProc = 
procExecutor.submitProcedure(proc);
+092LOG.info("Begin to execute " + 
rootProc);
+093// wait until the child procedure 
arrival
+094
while(procExecutor.getProcedures().size() < 2) {
+095  Thread.sleep(100);
+096}
+097SuspendProcedure suspendProcedure = 
(SuspendProcedure) procExecutor
+098.getProcedures().get(1);
+099// wait until the suspendProcedure 
executed
+100suspendProcedure.latch.countDown();
+101Thread.sleep(100);
+102// roll the procedure log
+103LOG.info("Begin to roll log ");
+104procStore.rollWriterForTesting();
+105LOG.info("finish to roll log ");
+106Thread.sleep(500);
+107LOG.info

[05/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
index dc287aa..5062e9b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
@@ -163,848 +163,848 @@
 155  public static final HBaseClassTestRule 
CLASS_RULE =
 156  
HBaseClassTestRule.forClass(TestAccessController.class);
 157
-158  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
-159  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
-160  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-161  private static Configuration conf;
-162
-163  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
-164   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
-165   * gets  eclipsed by the system user. 
*/
-166  private static Connection 
systemUserConnection;
-167
+158  private static final FsPermission 
FS_PERMISSION_ALL = FsPermission.valueOf("-rwxrwxrwx");
+159  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+160  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
+161  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+162  private static Configuration conf;
+163
+164  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
+165   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
+166   * gets  eclipsed by the system user. 
*/
+167  private static Connection 
systemUserConnection;
 168
-169  // user with all permissions
-170  private static User SUPERUSER;
-171  // user granted with all global 
permission
-172  private static User USER_ADMIN;
-173  // user with rw permissions on column 
family.
-174  private static User USER_RW;
-175  // user with read-only permissions
-176  private static User USER_RO;
-177  // user is table owner. will have all 
permissions on table
-178  private static User USER_OWNER;
-179  // user with create table permissions 
alone
-180  private static User USER_CREATE;
-181  // user with no permissions
-182  private static User USER_NONE;
-183  // user with admin rights on the column 
family
-184  private static User USER_ADMIN_CF;
-185
-186  private static final String GROUP_ADMIN 
= "group_admin";
-187  private static final String 
GROUP_CREATE = "group_create";
-188  private static final String GROUP_READ 
= "group_read";
-189  private static final String GROUP_WRITE 
= "group_write";
-190
-191  private static User USER_GROUP_ADMIN;
-192  private static User 
USER_GROUP_CREATE;
-193  private static User USER_GROUP_READ;
-194  private static User USER_GROUP_WRITE;
-195
-196  // TODO: convert this test to cover the 
full matrix in
-197  // 
https://hbase.apache.org/book/appendix_acl_matrix.html
-198  // creating all Scope x Permission 
combinations
-199
-200  private static TableName TEST_TABLE2 = 
TableName.valueOf("testtable2");
-201  private static byte[] TEST_FAMILY = 
Bytes.toBytes("f1");
-202  private static byte[] TEST_QUALIFIER = 
Bytes.toBytes("q1");
-203  private static byte[] TEST_ROW = 
Bytes.toBytes("r1");
-204
-205  private static 
MasterCoprocessorEnvironment CP_ENV;
-206  private static AccessController 
ACCESS_CONTROLLER;
-207  private static 
RegionServerCoprocessorEnvironment RSCP_ENV;
-208  private static 
RegionCoprocessorEnvironment RCP_ENV;
-209
-210  @Rule
-211  public TestName name = new 
TestName();
-212
-213  @BeforeClass
-214  public static void setupBeforeClass() 
throws Exception {
-215// setup configuration
-216conf = 
TEST_UTIL.getConfiguration();
-217// Up the handlers; this test needs 
more than usual.
-218
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
-219
-220
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-221  
MyShellBasedUnixGroupsMapping.class.getName());
-222
UserGroupInformation.setConfiguration(conf);
-223
-224// Enable security
-225enableSecurity(conf);
-226// In this particular test case, we 
can't use SecureBulkLoadEndpoint because its doAs will fail
-227// to move a file for a random user
-228
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
AccessController.class.getName());
-229// Verify enableSecurity sets up what 
we require
-230verifyConfiguration(conf);
-23

hbase-site git commit: INFRA-10751 Empty commit

2018-10-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3b2f2cea9 -> 02f92ca7d


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/02f92ca7
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/02f92ca7
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/02f92ca7

Branch: refs/heads/asf-site
Commit: 02f92ca7d1ab3d1fd19cb7dd8cd9518fb3453b94
Parents: 3b2f2ce
Author: jenkins 
Authored: Tue Oct 23 14:55:30 2018 +
Committer: jenkins 
Committed: Tue Oct 23 14:55:30 2018 +

--

--




[13/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static  void 
restart(final ProcedureExecutor procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static  void 
restart(final ProcedureExecutor procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static  void 
restart(ProcedureExecutor procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
-082  Callable 
actionBeforeStartWorker, Callable startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static  void 
restart(ProcedureExecutor procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+099  Callable 
actionBeforeStartWorker, Callable startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static  void 
restart(ProcedureExecutor procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+106  Callable 
actionBeforeStartWorker, Callable startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.testing = testing;
-119}
-120  }
-121
-122  public static void 
st

[21/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 97cc026..c6775e0 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -3018,13 +3018,15 @@
  
 bulkHLogShouldThrowNoErrorAndWriteMarkerWithBlankInput()
 - Method in class org.apache.hadoop.hbase.regionserver.TestBulkLoad
  
+BulkLoadAccessTestAction(FsPermission,
 Path) - Constructor for class 
org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+ 
 BulkLoadHelper(Path)
 - Constructor for class org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadHelper
  
 bulkLoadHFile(RpcController,
 ClientProtos.BulkLoadHFileRequest) - Method in class 
org.apache.hadoop.hbase.client.TestClientNoCluster.FakeServer
  
 bulkLoadHFile(RpcController,
 ClientProtos.BulkLoadHFileRequest) - Method in class 
org.apache.hadoop.hbase.master.MockRegionServer
  
-bulkLoadHFile(TableName,
 byte[], byte[], byte[][][], int) - Method in class 
org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadHelper
+bulkLoadHFile(TableName)
 - Method in class org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadHelper
  
 bulkLoadHFiles(List>, Token, String, byte[]) - Method in class 
org.apache.hadoop.hbase.regionserver.SecureBulkLoadEndpointClient
 
@@ -4097,6 +4099,8 @@
  
 childSpwaned
 - Variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass.RootProcedure
  
+childSpwaned
 - Variable in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.RootProcedure
+ 
 chore
 - Variable in class org.apache.hadoop.hbase.quotas.TestNamespaceQuotaViolationStore
  
 chore
 - Variable in class org.apache.hadoop.hbase.quotas.TestQuotaObserverChore
@@ -5501,6 +5505,8 @@
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass
  
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup
+ 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureEvents
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureExecution
@@ -10170,6 +10176,8 @@
  
 createProc(long,
 long) - Method in class 
org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree
  
+createProcExecutor(String)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup
+ 
 createPut(int,
 boolean) - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
  
 createPut(long)
 - Static method in class org.apache.hadoop.hbase.client.TestSimpleRequestController
@@ -13610,6 +13618,12 @@
  
 execute(TestProcedureBypass.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass.SuspendProcedure
  
+execute(TestProcedureCleanup.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.RootProcedure
+ 
+execute(TestProcedureCleanup.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.SuspendProcedure
+ 
+execute(TestProcedureCleanup.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.WaitProcedure
+ 
 execute(TestProcedureEvents.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureEvents.TestTimeoutEventProcedure
  
 execute(Void)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureExecution.TestFaultyRollback
@@ -14795,6 +14809,8 @@
  
 fileNameFilter
 - Variable in class org.apache.hadoop.hbase.ClassFinder.Not
  
+filePermission
 - Variable in class org.apache.hadoop.hbase.security.access.TestAccessController.BulkLoadAccessTestAction
+ 
 files
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestReplicationSourceManager
  
 files
 - Variable in class org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock.RegionData
@@ -15404,6 +15420,8 @@
  
 fs
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass
  
+fs
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup
+ 
 fs
 - Variable in class org.apache.hadoop.hbase.procedure2.TestProcedureEvents
  
 fs
 - Variable in class org.apache.hadoop.hbase.procedure2.TestProcedureExecution
@@ -15502,6 +15520,8 @@
  
 fs - 
Variable in class org.apache.hadoop.hbase.wal.TestWALSplit
  
+FS_PERMISSION_ALL
 - Static variable in class org.apache.hadoop.hbase.security.access.TestAccessController
+ 
 FS_PERMS
 - Static variable in class org.apache.hadoop.hbase.test.IntegrationTestZKAndFSPermissions
  
 FS_URI
 - Static variable in class org.apache.hadoop.hbase.HBaseTestingUtility
@@ -20687,6 +20707,8 @@
  
 htu
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass
  
+htu
 - Static variable in class org.apache.ha

[03/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
index dc287aa..5062e9b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.PingCoprocessor.html
@@ -163,848 +163,848 @@
 155  public static final HBaseClassTestRule 
CLASS_RULE =
 156  
HBaseClassTestRule.forClass(TestAccessController.class);
 157
-158  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
-159  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
-160  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-161  private static Configuration conf;
-162
-163  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
-164   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
-165   * gets  eclipsed by the system user. 
*/
-166  private static Connection 
systemUserConnection;
-167
+158  private static final FsPermission 
FS_PERMISSION_ALL = FsPermission.valueOf("-rwxrwxrwx");
+159  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+160  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
+161  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+162  private static Configuration conf;
+163
+164  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
+165   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
+166   * gets  eclipsed by the system user. 
*/
+167  private static Connection 
systemUserConnection;
 168
-169  // user with all permissions
-170  private static User SUPERUSER;
-171  // user granted with all global 
permission
-172  private static User USER_ADMIN;
-173  // user with rw permissions on column 
family.
-174  private static User USER_RW;
-175  // user with read-only permissions
-176  private static User USER_RO;
-177  // user is table owner. will have all 
permissions on table
-178  private static User USER_OWNER;
-179  // user with create table permissions 
alone
-180  private static User USER_CREATE;
-181  // user with no permissions
-182  private static User USER_NONE;
-183  // user with admin rights on the column 
family
-184  private static User USER_ADMIN_CF;
-185
-186  private static final String GROUP_ADMIN 
= "group_admin";
-187  private static final String 
GROUP_CREATE = "group_create";
-188  private static final String GROUP_READ 
= "group_read";
-189  private static final String GROUP_WRITE 
= "group_write";
-190
-191  private static User USER_GROUP_ADMIN;
-192  private static User 
USER_GROUP_CREATE;
-193  private static User USER_GROUP_READ;
-194  private static User USER_GROUP_WRITE;
-195
-196  // TODO: convert this test to cover the 
full matrix in
-197  // 
https://hbase.apache.org/book/appendix_acl_matrix.html
-198  // creating all Scope x Permission 
combinations
-199
-200  private static TableName TEST_TABLE2 = 
TableName.valueOf("testtable2");
-201  private static byte[] TEST_FAMILY = 
Bytes.toBytes("f1");
-202  private static byte[] TEST_QUALIFIER = 
Bytes.toBytes("q1");
-203  private static byte[] TEST_ROW = 
Bytes.toBytes("r1");
-204
-205  private static 
MasterCoprocessorEnvironment CP_ENV;
-206  private static AccessController 
ACCESS_CONTROLLER;
-207  private static 
RegionServerCoprocessorEnvironment RSCP_ENV;
-208  private static 
RegionCoprocessorEnvironment RCP_ENV;
-209
-210  @Rule
-211  public TestName name = new 
TestName();
-212
-213  @BeforeClass
-214  public static void setupBeforeClass() 
throws Exception {
-215// setup configuration
-216conf = 
TEST_UTIL.getConfiguration();
-217// Up the handlers; this test needs 
more than usual.
-218
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
-219
-220
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-221  
MyShellBasedUnixGroupsMapping.class.getName());
-222
UserGroupInformation.setConfiguration(conf);
-223
-224// Enable security
-225enableSecurity(conf);
-226// In this particular test case, we 
can't use SecureBulkLoadEndpoint because its doAs will fail
-227// to move a file for a random user
-228
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
AccessController.class.getName());
-229// Verify enableSecurity sets up what 
we require
-230verifyConfiguration(conf)

[26/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
index b456cd2..9b964f6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
@@ -105,1302 +105,1320 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-101 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-102 * files, then we can delete it. This is 
because that, every time we call
-103 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
-104 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
-105 * deleted.
-106 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-107 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-108 */
-109@InterfaceAudience.Private
-110public class WALProcedureStore extends 
ProcedureStoreBase {
-111  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-112  public static final String LOG_PREFIX = 
"pv2-";
-113  /** Used to construct the name of the 
log directory for master procedures */
-114  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-115
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+101 * If we find out
+102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
+103 * files, then we can delete it. This is 
because that, every time we call
+104 * {@link 
ProcedureStore#insert(Procedure[])} or {@link 
ProcedureStore#update(Procedure)}, we will
+105 * persist the full state of a Procedure, 
so the earlier wal records for this procedure can all be
+106 * deleted.
+107 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
+108 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
+109 */
+110@InterfaceAudience.Private
+111public class WALProcedureStore extends 
ProcedureStoreBase {
+112  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
+113  public static final String LOG_PREFIX = 
"pv2-";
+114  /** Used to construct the name of the 
log directory for master procedures */
+115  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
 116
-117  public interface LeaseRecovery {
-118void recoverFileLease(FileSystem fs, 
Path path) throws IOException;
-119  }
-120
-121  public static final String 
WAL_COUNT_WARN_THRESHOLD_CONF_KEY =
-122
"hbase.procedure.store.wal.warn.threshold";
-123  private static final int 
DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10;
-124
-125  public static final String 
EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY =
-126
"hbase.procedure.store.wal.exec.cleanup.on.load";
-127  private static final boolean 
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true;
-128
-129  public static final String 
MAX_RETRIES_BEFORE_ROLL_CONF_KEY =
-130
"hbase.procedure.store.wal.max.retries.before.roll";
-131  private static final int 
DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3;
-132
-133  public static final String 
WAIT_BEFORE_ROLL_CONF_KEY =
-134
"hbase.procedure.store.wal.wait.before.roll";
-135  private static final int 
DEFAULT_WAIT_BEFORE_ROLL = 500;
-136
-137  public static final String 
ROLL_RETRIES_CONF_KEY =
-138
"hbase.procedure.store.wal.max.roll.retries";
-139  private static final int 
DEFAULT_ROLL_RETRIES = 3;
-140
-141  public static final String 
MAX_SYNC_FAILURE_ROLL_CONF_KEY =
-142
"hbase.procedure.store.wal.sync.failure.roll.max";
-143  private static final int 
DEFAULT_MAX_SYNC_FAILURE_ROLL = 3;
-144
-145  public static final String 
PERIODIC_ROLL_CONF_KEY =
-146
"hbase.procedure.store.wal.periodic.roll.msec";
-147  private static final int 
DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h
-148
-149  public static final String 
SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec";
-150  private static final int 
DEFAULT_SYNC_WAIT_MSEC = 100;
-151
-152  public static final String 
USE_HSYNC_CONF_KEY = "hbase.procedure.store.wal.use.hsync";
-153  private static final boolean 
DEFAULT_USE_HSYNC = true;

[12/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopProcedure.html
index eb90a1f..e3d6f54 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopProcedure.html
@@ -75,557 +75,583 @@
 067});
 068  }
 069
-070  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
-071restart(procExecutor, false, true, 
null, null, null);
-072  }
-073
-074  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
-075  boolean abortOnCorruption) throws 
IOException {
-076procExecutor.init(numThreads, 
abortOnCorruption);
-077procExecutor.startWorkers();
+070  public static  void 
restart(final ProcedureExecutor procExecutor,
+071  boolean abort, boolean 
startWorkers) throws Exception {
+072restart(procExecutor, false, true, 
null, null, null,  abort, startWorkers);
+073  }
+074
+075  public static  void 
restart(final ProcedureExecutor procExecutor,
+076  boolean abort) throws Exception {
+077restart(procExecutor, false, true, 
null, null, null, abort, true);
 078  }
 079
-080  public static  void 
restart(ProcedureExecutor procExecutor,
-081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
-082  Callable 
actionBeforeStartWorker, Callable startAction)
-083  throws Exception {
-084final ProcedureStore procStore = 
procExecutor.getStore();
-085final int storeThreads = 
procExecutor.getCorePoolSize();
-086final int execThreads = 
procExecutor.getCorePoolSize();
-087
-088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
-089if (avoidTestKillDuringRestart) {
-090  procExecutor.testing = null;
-091}
-092
-093// stop
-094LOG.info("RESTART - Stop");
-095procExecutor.stop();
-096procStore.stop(false);
-097if (stopAction != null) {
-098  stopAction.call();
-099}
-100procExecutor.join();
-101
procExecutor.getScheduler().clear();
-102
-103// nothing running...
-104
-105// re-start
-106LOG.info("RESTART - Start");
-107procStore.start(storeThreads);
-108procExecutor.init(execThreads, 
failOnCorrupted);
-109if (actionBeforeStartWorker != null) 
{
-110  actionBeforeStartWorker.call();
-111}
-112procExecutor.startWorkers();
-113if (startAction != null) {
-114  startAction.call();
+080  public static  void 
restart(final ProcedureExecutor procExecutor) throws Exception {
+081restart(procExecutor, false, true, 
null, null, null, false, true);
+082  }
+083
+084  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+085  boolean abortOnCorruption) throws 
IOException {
+086initAndStartWorkers(procExecutor, 
numThreads, abortOnCorruption, true);
+087  }
+088
+089  public static void 
initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads,
+090  boolean abortOnCorruption, boolean 
startWorkers) throws IOException {
+091procExecutor.init(numThreads, 
abortOnCorruption);
+092if (startWorkers) {
+093  procExecutor.startWorkers();
+094}
+095  }
+096
+097  public static  void 
restart(ProcedureExecutor procExecutor,
+098  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+099  Callable 
actionBeforeStartWorker, Callable startAction) throws Exception {
+100restart(procExecutor, 
avoidTestKillDuringRestart, failOnCorrupted, stopAction,
+101  actionBeforeStartWorker, 
startAction, false, true);
+102  }
+103
+104  public static  void 
restart(ProcedureExecutor procExecutor,
+105  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, Callable stopAction,
+106  Callable 
actionBeforeStartWorker, Callable startAction, boolean abort,
+107  boolean startWorkers) throws 
Exception {
+108final ProcedureStore procStore = 
procExecutor.getStore();
+109final int storeThreads = 
procExecutor.getCorePoolSize();
+110final int execThreads = 
procExecutor.getCorePoolSize();
+111
+112final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+113if (avoidTestKillDuringRestart) {
+114  procExecutor.testing = null;
 115}
 116
-117if (avoidTestKillDuringRestart) {
-118  procExecutor.testing = testing;
-119}
-120  }
-121
-122  public stati

[15/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.html
index b78b9af..68eb4ec 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -137,18 +137,22 @@ extends Class and Description
 
 
+private class 
+TestAccessController.BulkLoadAccessTestAction 
+
+
 static class 
 TestAccessController.BulkLoadHelper 
 
-
+
 static class 
 TestAccessController.MyShellBasedUnixGroupsMapping 
 
-
+
 static class 
 TestAccessController.PingCoprocessor 
 
-
+
 static class 
 TestAccessController.TestTableDDLProcedure 
 
@@ -191,112 +195,116 @@ extends CP_ENV 
 
 
+private static 
org.apache.hadoop.fs.permission.FsPermission
+FS_PERMISSION_ALL 
+
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 GROUP_ADMIN 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 GROUP_CREATE 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 GROUP_READ 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 GROUP_WRITE 
 
-
+
 private static org.slf4j.Logger
 LOG 
 
-
+
 org.junit.rules.TestName
 name 
 
-
+
 private static 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment
 RCP_ENV 
 
-
+
 private static 
org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment
 RSCP_ENV 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 SUPERUSER 
 
-
+
 private static 
org.apache.hadoop.hbase.client.Connection
 systemUserConnection
 The systemUserConnection created here is tied to the system 
user.
 
 
-
+
 private static byte[]
 TEST_FAMILY 
 
-
+
 private static byte[]
 TEST_QUALIFIER 
 
-
+
 private static byte[]
 TEST_ROW 
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 TEST_TABLE 
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 TEST_TABLE2 
 
-
+
 private static HBaseTestingUtility
 TEST_UTIL 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 USER_ADMIN 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 USER_ADMIN_CF 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 USER_CREATE 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 USER_GROUP_ADMIN 
 
-
+
 private static 
org.apache.hadoop.hbase.security.User
 USER_GROUP_CREATE 
 

[22/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 9a5f0c3..4cf620c 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase Downloads
 
@@ -461,7 +461,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 6f4f413..218f5af 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -341,7 +341,7 @@ for more details.
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/index.html
--
diff --git a/index.html b/index.html
index 751cfe4..495f5ee 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase™ Home
 
@@ -421,7 +421,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/integration.html
--
diff --git a/integration.html b/integration.html
index f3d0b15..2f1b33b 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – CI Management
 
@@ -301,7 +301,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 2fa05fb..175cba7 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Issue Management
 
@@ -298,7 +298,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/license.html
--
diff --git a/license.html b/license.html
index de43453..63df443 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Licenses
 
@@ -501,7 +501,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index 6a56c16..01b4bb4 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Mailing Lists
 
@@ -351,7 +351,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-22
+  Last Published: 
2018-10-23
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/metrics.html
--
diff --git a/metrics.html b/metrics.html
index c0fea89..28c216d 100644
--- a/metrics.html
+++ b/m

[30/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 44806c1..4a0fe38 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -711,19 +711,19 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
 org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 23060c2..6851ee1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index e8df157..feee307 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -247,9 +247,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.wal.CompressionContext.DictionaryIndex
 org.apache.hadoop.hbase.regionserver.wal.RingBufferTruck.Type
 org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WALHdrResult
+org.apache.hadoop.hbase.regionserver

[23/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index b595018..c82bf55 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -63,2735 +63,2737 @@
 055import 
org.apache.hadoop.conf.Configuration;
 056import org.apache.hadoop.fs.FileSystem;
 057import org.apache.hadoop.fs.Path;
-058import org.apache.hadoop.hbase.Cell;
-059import 
org.apache.hadoop.hbase.CellComparator;
-060import 
org.apache.hadoop.hbase.CellUtil;
-061import 
org.apache.hadoop.hbase.CompoundConfiguration;
-062import 
org.apache.hadoop.hbase.HConstants;
-063import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-064import 
org.apache.hadoop.hbase.TableName;
-065import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-066import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-067import 
org.apache.hadoop.hbase.client.RegionInfo;
-068import 
org.apache.hadoop.hbase.client.Scan;
-069import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-070import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-071import 
org.apache.hadoop.hbase.io.HeapSize;
-072import 
org.apache.hadoop.hbase.io.compress.Compression;
-073import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-074import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-075import 
org.apache.hadoop.hbase.io.hfile.HFile;
-076import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-077import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-078import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-079import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-080import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-081import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-082import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-083import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-084import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
-085import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-086import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-087import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-088import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-089import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-090import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-091import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-092import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-093import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-094import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-095import 
org.apache.hadoop.hbase.security.User;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.ChecksumType;
-098import 
org.apache.hadoop.hbase.util.ClassSize;
-099import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-100import 
org.apache.hadoop.hbase.util.Pair;
-101import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-102import 
org.apache.hadoop.util.StringUtils;
-103import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-104import 
org.apache.yetus.audience.InterfaceAudience;
-105import org.slf4j.Logger;
-106import org.slf4j.LoggerFactory;
-107import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-108import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-109import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection;
-110import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-111import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-112import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-113import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-114import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
-115import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-118
-119/**
-120 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-121 * or more StoreFiles, which stretch 
backwards over time.
-122 *
-123 * 

There's no reason to consider append-logging at this level; all logging -124 * and locking is handled at the HRegion level. Store just provides -125 * services to manage sets of StoreFiles. One of the most important of those -126 * services is compaction services where


[18/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.SuspendProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.SuspendProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.SuspendProcedure.html
new file mode 100644
index 000..e6b932b
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.SuspendProcedure.html
@@ -0,0 +1,369 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestProcedureCleanup.SuspendProcedure (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2
+Class 
TestProcedureCleanup.SuspendProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedure
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure
+
+
+org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.SuspendProcedure
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in 
java.lang">Comparable>
+
+
+Enclosing class:
+TestProcedureCleanup
+
+
+
+public static class TestProcedureCleanup.SuspendProcedure
+extends ProcedureTestingUtility.NoopProcedure
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true";
 title="class or interface in 
java.util.concurrent">CountDownLatch
+latch 
+
+
+
+
+
+
+Fields inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID, NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+SuspendProcedure() 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+protected 
org.apache.hadoop.hbase.procedure2.Procedure[]
+execute(TestProcedureCleanup.TestProcEnv env) 
+
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure
+abort,
 deserializeStateData,
 rollback,
 serializeStateData
+
+
+
+
+
+Methods inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+acquireLock, addStackIndex, afterReplay, beforeReplay, bypass, 
compareTo, completionCleanup, doAcquireLock, doExecute, doReleaseLock, 
doRollback, elapsedTime, getChildrenLatch, getException, getLastUpdate, 
getNonceKey, getOwner, getParentProcId, getProcedureMetrics, getProcId, 
getProcIdHashCode, getProcName, getResult, getRootProcedureId, getRootProcId, 
getStackIndexes, getState, getSubmittedTime, getTimeout, getTimeoutTimestamp, 
hasChildren, hasException, hasLock, hasOwner, hasParent, hasTimeout, 
haveSameParent, holdLock, incChildrenLatch, isBypass, isFailed, isFinished, 
isInitializing, isRunnable, isSuccess, isWaiting, isYieldAfterExecutionStep, 
lockedWhenLoading, needPersistence, releaseLock, removeStackIndex, 
resetPersistence, restoreLock, setAbortFailure, setChildrenLatch, setFailure, 
setFailure, setLastUpdate, setNonceKey, setOwner, setOwner, setParentProcId, 
setProcId, setResult, setRootProcId, setStackIndexes, setState, 
setSubmittedTime, setTimeout, setTimeoutFailure, 
 shouldWaitClientAck, skipPersistence, toString, toSt

[17/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.RootProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.RootProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.RootProcedure.html
new file mode 100644
index 000..2396f0a
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.RootProcedure.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.RootProcedure (Apache 
HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.TestProcedureCleanup.RootProcedure
+
+No usage of 
org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.RootProcedure
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.SuspendProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.SuspendProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.SuspendProcedure.html
new file mode 100644
index 000..b8489e4
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.SuspendProcedure.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.SuspendProcedure 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.TestProcedureCleanup.SuspendProcedure
+
+No usage of 
org.apache.hadoop.hbase.procedure2.TestProcedureCleanup.SuspendProcedure
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.TestProcEnv.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.TestProcEnv.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureCleanup.TestProcEnv.html
new file mode 100644
index 000..06fc5f2
--- /dev/null
+++ 
b/tes

[37/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/3b2f2cea
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/3b2f2cea
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/3b2f2cea

Branch: refs/heads/asf-site
Commit: 3b2f2cea9b4d8e71bf34d850f88cca0cb2ef3024
Parents: ab31afe
Author: jenkins 
Authored: Tue Oct 23 14:55:08 2018 +
Committer: jenkins 
Committed: Tue Oct 23 14:55:08 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 6148 
 checkstyle.rss  |   20 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |4 +-
 devapidocs/index-all.html   |   10 +-
 .../hadoop/hbase/backup/package-tree.html   |4 +-
 .../hadoop/hbase/client/package-tree.html   |   26 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |2 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 .../hadoop/hbase/io/hfile/package-tree.html |6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/mapreduce/package-tree.html|6 +-
 .../hbase/master/balancer/package-tree.html |2 +-
 .../hadoop/hbase/master/package-tree.html   |6 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   16 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../procedure2/store/ProcedureStoreTracker.html |  128 +-
 .../procedure2/store/class-use/BitSetNode.html  |8 +
 .../store/class-use/ProcedureStoreTracker.html  |3 +-
 .../wal/WALProcedureStore.LeaseRecovery.html|4 +-
 .../store/wal/WALProcedureStore.PushType.html   |   12 +-
 .../wal/WALProcedureStore.SyncMetrics.html  |   24 +-
 .../procedure2/store/wal/WALProcedureStore.html |  261 +-
 .../hadoop/hbase/quotas/package-tree.html   |6 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |   34 +-
 .../hadoop/hbase/regionserver/HStore.html   |  400 +-
 .../hadoop/hbase/regionserver/package-tree.html |   12 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../hadoop/hbase/replication/package-tree.html  |2 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../hadoop/hbase/thrift/package-tree.html   |4 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   12 +-
 .../org/apache/hadoop/hbase/Version.html|4 +-
 .../ProcedureStoreTracker.DeleteState.html  |  509 +-
 .../procedure2/store/ProcedureStoreTracker.html |  509 +-
 .../wal/WALProcedureStore.LeaseRecovery.html| 2602 +++
 .../store/wal/WALProcedureStore.PushType.html   | 2602 +++
 .../wal/WALProcedureStore.SyncMetrics.html  | 2602 +++
 .../procedure2/store/wal/WALProcedureStore.html | 2602 +++
 .../regionserver/HStore.StoreFlusherImpl.html   | 5446 +++---
 .../hadoop/hbase/regionserver/HStore.html   | 5446 +++---
 downloads.html  |4 +-
 export_control.html |4 +-
 index.html  |4 +-
 integration.html|4 +-
 issue-tracking.html |4 +-
 license.html|4 +-
 mail-lists.html |4 +-
 metrics.html|4 +-
 old_news.html   |4 +-
 plugin-management.html  |4 +-
 plugins.html|4 +-
 poweredbyhbase.html |4 +-
 project-info.html   |4 +-
 project-reports.html|4 +-
 project-summary.html|4 +-
 pseudo-distributed.html |4 +-
 replication.html|4 +-
 resources.html  |4 +-
 source-repository.html  |4 +-
 sponsors.html   |4 +-
 supportingprojects.html |4 +-
 team-li

[29/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
index 8515fa3..1579eab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
@@ -211,7 +211,7 @@
 203   * then we mark it as deleted.
 204   * @see 
#setDeletedIfModified(long...)
 205   */
-206  public void 
setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
+206  public void 
setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, boolean 
globalTracker) {
 207BitSetNode trackerNode = null;
 208for (BitSetNode node : map.values()) 
{
 209  final long minProcId = 
node.getStart();
@@ -222,250 +222,285 @@
 214}
 215
 216trackerNode = 
tracker.lookupClosestNode(trackerNode, procId);
-217if (trackerNode == null || 
!trackerNode.contains(procId) ||
-218  trackerNode.isModified(procId)) 
{
-219  // the procedure was removed or 
modified
-220  node.delete(procId);
-221}
-222  }
-223}
-224  }
-225
-226  /**
-227   * lookup the node containing the 
specified procId.
-228   * @param node cached node to check 
before doing a lookup
-229   * @param procId the procId to lookup
-230   * @return the node that may contains 
the procId or null
-231   */
-232  private BitSetNode 
lookupClosestNode(final BitSetNode node, final long procId) {
-233if (node != null && 
node.contains(procId)) return node;
-234final Map.Entry entry = map.floorEntry(procId);
-235return entry != null ? 
entry.getValue() : null;
-236  }
-237
-238  private void trackProcIds(long procId) 
{
-239minModifiedProcId = 
Math.min(minModifiedProcId, procId);
-240maxModifiedProcId = 
Math.max(maxModifiedProcId, procId);
+217if (trackerNode == null || 
!trackerNode.contains(procId)) {
+218  // the procId is not exist in 
the track, we can only delete the proc
+219  // if globalTracker set to 
true.
+220  // Only if the procedure is not 
in the global tracker we can delete the
+221  // the procedure. In other 
cases, the procedure may not update in a single
+222  // log, we cannot delete it 
just because the log's track doesn't have
+223  // any info for the 
procedure.
+224  if (globalTracker) {
+225node.delete(procId);
+226  }
+227  continue;
+228}
+229// Only check delete in the 
global tracker, only global tracker has the
+230// whole picture
+231if (globalTracker && 
trackerNode.isDeleted(procId) == DeleteState.YES) {
+232  node.delete(procId);
+233  continue;
+234}
+235if 
(trackerNode.isModified(procId)) {
+236  // the procedure was modified
+237  node.delete(procId);
+238}
+239  }
+240}
 241  }
 242
-243  public long getModifiedMinProcId() {
-244return minModifiedProcId;
-245  }
-246
-247  public long getModifiedMaxProcId() {
-248return maxModifiedProcId;
-249  }
-250
-251  public void reset() {
-252this.keepDeletes = false;
-253this.partial = false;
-254this.map.clear();
-255resetModified();
-256  }
-257
-258  public boolean isModified(long procId) 
{
-259final Map.Entry entry = map.floorEntry(procId);
-260return entry != null && 
entry.getValue().contains(procId) &&
-261  
entry.getValue().isModified(procId);
+243  /**
+244   * lookup the node containing the 
specified procId.
+245   * @param node cached node to check 
before doing a lookup
+246   * @param procId the procId to lookup
+247   * @return the node that may contains 
the procId or null
+248   */
+249  private BitSetNode 
lookupClosestNode(final BitSetNode node, final long procId) {
+250if (node != null && 
node.contains(procId)) return node;
+251final Map.Entry entry = map.floorEntry(procId);
+252return entry != null ? 
entry.getValue() : null;
+253  }
+254
+255  private void trackProcIds(long procId) 
{
+256minModifiedProcId = 
Math.min(minModifiedProcId, procId);
+257maxModifiedProcId = 
Math.max(maxModifiedProcId, procId);
+258  }
+259
+260  public long getModifiedMinProcId() {
+261return minModifiedProcId;
 262  }
 263
-264  /**
-265   * If {@link #partial} is false, 
returns state from the bitmap. If no state is found for
-266   * {@code procId}, returns YES.
-267   * If partial is true, tracker doesn't 
have complete view of system state, so it returns MAYBE
-268   * if there is no update for the 
procedure or if it doesn't have 

[24/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index b595018..c82bf55 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -63,2735 +63,2737 @@
 055import 
org.apache.hadoop.conf.Configuration;
 056import org.apache.hadoop.fs.FileSystem;
 057import org.apache.hadoop.fs.Path;
-058import org.apache.hadoop.hbase.Cell;
-059import 
org.apache.hadoop.hbase.CellComparator;
-060import 
org.apache.hadoop.hbase.CellUtil;
-061import 
org.apache.hadoop.hbase.CompoundConfiguration;
-062import 
org.apache.hadoop.hbase.HConstants;
-063import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-064import 
org.apache.hadoop.hbase.TableName;
-065import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-066import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-067import 
org.apache.hadoop.hbase.client.RegionInfo;
-068import 
org.apache.hadoop.hbase.client.Scan;
-069import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-070import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-071import 
org.apache.hadoop.hbase.io.HeapSize;
-072import 
org.apache.hadoop.hbase.io.compress.Compression;
-073import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-074import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-075import 
org.apache.hadoop.hbase.io.hfile.HFile;
-076import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-077import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-078import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-079import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-080import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-081import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-082import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-083import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-084import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
-085import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-086import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-087import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-088import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-089import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-090import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-091import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-092import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-093import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-094import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-095import 
org.apache.hadoop.hbase.security.User;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.ChecksumType;
-098import 
org.apache.hadoop.hbase.util.ClassSize;
-099import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-100import 
org.apache.hadoop.hbase.util.Pair;
-101import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-102import 
org.apache.hadoop.util.StringUtils;
-103import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-104import 
org.apache.yetus.audience.InterfaceAudience;
-105import org.slf4j.Logger;
-106import org.slf4j.LoggerFactory;
-107import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-108import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-109import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollection;
-110import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-111import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-112import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-113import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-114import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
-115import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-118
-119/**
-120 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-121 * or more StoreFiles, which stretch 
backwards over time.
-122 *
-123 * 

There's no reason to consider append-logging at this level; all logging -124 * and locking is handled at the HRegion level. Store just provides -125 * services to manage sets of Store


[02/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.TestTableDDLProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.TestTableDDLProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.TestTableDDLProcedure.html
index dc287aa..5062e9b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.TestTableDDLProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.TestTableDDLProcedure.html
@@ -163,848 +163,848 @@
 155  public static final HBaseClassTestRule 
CLASS_RULE =
 156  
HBaseClassTestRule.forClass(TestAccessController.class);
 157
-158  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
-159  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
-160  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-161  private static Configuration conf;
-162
-163  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
-164   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
-165   * gets  eclipsed by the system user. 
*/
-166  private static Connection 
systemUserConnection;
-167
+158  private static final FsPermission 
FS_PERMISSION_ALL = FsPermission.valueOf("-rwxrwxrwx");
+159  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+160  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
+161  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+162  private static Configuration conf;
+163
+164  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
+165   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
+166   * gets  eclipsed by the system user. 
*/
+167  private static Connection 
systemUserConnection;
 168
-169  // user with all permissions
-170  private static User SUPERUSER;
-171  // user granted with all global 
permission
-172  private static User USER_ADMIN;
-173  // user with rw permissions on column 
family.
-174  private static User USER_RW;
-175  // user with read-only permissions
-176  private static User USER_RO;
-177  // user is table owner. will have all 
permissions on table
-178  private static User USER_OWNER;
-179  // user with create table permissions 
alone
-180  private static User USER_CREATE;
-181  // user with no permissions
-182  private static User USER_NONE;
-183  // user with admin rights on the column 
family
-184  private static User USER_ADMIN_CF;
-185
-186  private static final String GROUP_ADMIN 
= "group_admin";
-187  private static final String 
GROUP_CREATE = "group_create";
-188  private static final String GROUP_READ 
= "group_read";
-189  private static final String GROUP_WRITE 
= "group_write";
-190
-191  private static User USER_GROUP_ADMIN;
-192  private static User 
USER_GROUP_CREATE;
-193  private static User USER_GROUP_READ;
-194  private static User USER_GROUP_WRITE;
-195
-196  // TODO: convert this test to cover the 
full matrix in
-197  // 
https://hbase.apache.org/book/appendix_acl_matrix.html
-198  // creating all Scope x Permission 
combinations
-199
-200  private static TableName TEST_TABLE2 = 
TableName.valueOf("testtable2");
-201  private static byte[] TEST_FAMILY = 
Bytes.toBytes("f1");
-202  private static byte[] TEST_QUALIFIER = 
Bytes.toBytes("q1");
-203  private static byte[] TEST_ROW = 
Bytes.toBytes("r1");
-204
-205  private static 
MasterCoprocessorEnvironment CP_ENV;
-206  private static AccessController 
ACCESS_CONTROLLER;
-207  private static 
RegionServerCoprocessorEnvironment RSCP_ENV;
-208  private static 
RegionCoprocessorEnvironment RCP_ENV;
-209
-210  @Rule
-211  public TestName name = new 
TestName();
-212
-213  @BeforeClass
-214  public static void setupBeforeClass() 
throws Exception {
-215// setup configuration
-216conf = 
TEST_UTIL.getConfiguration();
-217// Up the handlers; this test needs 
more than usual.
-218
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
-219
-220
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-221  
MyShellBasedUnixGroupsMapping.class.getName());
-222
UserGroupInformation.setConfiguration(conf);
-223
-224// Enable security
-225enableSecurity(conf);
-226// In this particular test case, we 
can't use SecureBulkLoadEndpoint because its doAs will fail
-227// to move a file for a random user
-228
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
AccessController.class.getName());
-229// Verify enableSecurity sets up what 
we require
-23

[01/37] hbase-site git commit: Published site at 3b68e5393edba011146962c7457faffc1e3c0ee7.

2018-10-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ab31afe83 -> 3b2f2cea9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3b2f2cea/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.html
index dc287aa..5062e9b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.html
@@ -163,848 +163,848 @@
 155  public static final HBaseClassTestRule 
CLASS_RULE =
 156  
HBaseClassTestRule.forClass(TestAccessController.class);
 157
-158  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
-159  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
-160  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-161  private static Configuration conf;
-162
-163  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
-164   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
-165   * gets  eclipsed by the system user. 
*/
-166  private static Connection 
systemUserConnection;
-167
+158  private static final FsPermission 
FS_PERMISSION_ALL = FsPermission.valueOf("-rwxrwxrwx");
+159  private static final Logger LOG = 
LoggerFactory.getLogger(TestAccessController.class);
+160  private static TableName TEST_TABLE = 
TableName.valueOf("testtable1");
+161  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+162  private static Configuration conf;
+163
+164  /** The systemUserConnection created 
here is tied to the system user. In case, you are planning
+165   * to create AccessTestAction, DON'T 
use this systemUserConnection as the 'doAs' user
+166   * gets  eclipsed by the system user. 
*/
+167  private static Connection 
systemUserConnection;
 168
-169  // user with all permissions
-170  private static User SUPERUSER;
-171  // user granted with all global 
permission
-172  private static User USER_ADMIN;
-173  // user with rw permissions on column 
family.
-174  private static User USER_RW;
-175  // user with read-only permissions
-176  private static User USER_RO;
-177  // user is table owner. will have all 
permissions on table
-178  private static User USER_OWNER;
-179  // user with create table permissions 
alone
-180  private static User USER_CREATE;
-181  // user with no permissions
-182  private static User USER_NONE;
-183  // user with admin rights on the column 
family
-184  private static User USER_ADMIN_CF;
-185
-186  private static final String GROUP_ADMIN 
= "group_admin";
-187  private static final String 
GROUP_CREATE = "group_create";
-188  private static final String GROUP_READ 
= "group_read";
-189  private static final String GROUP_WRITE 
= "group_write";
-190
-191  private static User USER_GROUP_ADMIN;
-192  private static User 
USER_GROUP_CREATE;
-193  private static User USER_GROUP_READ;
-194  private static User USER_GROUP_WRITE;
-195
-196  // TODO: convert this test to cover the 
full matrix in
-197  // 
https://hbase.apache.org/book/appendix_acl_matrix.html
-198  // creating all Scope x Permission 
combinations
-199
-200  private static TableName TEST_TABLE2 = 
TableName.valueOf("testtable2");
-201  private static byte[] TEST_FAMILY = 
Bytes.toBytes("f1");
-202  private static byte[] TEST_QUALIFIER = 
Bytes.toBytes("q1");
-203  private static byte[] TEST_ROW = 
Bytes.toBytes("r1");
-204
-205  private static 
MasterCoprocessorEnvironment CP_ENV;
-206  private static AccessController 
ACCESS_CONTROLLER;
-207  private static 
RegionServerCoprocessorEnvironment RSCP_ENV;
-208  private static 
RegionCoprocessorEnvironment RCP_ENV;
-209
-210  @Rule
-211  public TestName name = new 
TestName();
-212
-213  @BeforeClass
-214  public static void setupBeforeClass() 
throws Exception {
-215// setup configuration
-216conf = 
TEST_UTIL.getConfiguration();
-217// Up the handlers; this test needs 
more than usual.
-218
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
-219
-220
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-221  
MyShellBasedUnixGroupsMapping.class.getName());
-222
UserGroupInformation.setConfiguration(conf);
-223
-224// Enable security
-225enableSecurity(conf);
-226// In this particular test case, we 
can't use SecureBulkLoadEndpoint because its doAs will fail
-227// to move a file for a random user
-228
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
AccessController.class.getName());
-229// Verify enableSecurity sets up what 
we require
-230verifyConfigurati

hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 80ac2f969 -> 1b1dabd1f


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b1dabd1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b1dabd1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b1dabd1

Branch: refs/heads/branch-2
Commit: 1b1dabd1f5c09ab75887b84132aa5e766537cb07
Parents: 80ac2f9
Author: Allan Yang 
Authored: Tue Oct 23 16:15:35 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:15:35 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1b1dabd1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 7c04a95f4 -> e29ce9f93


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e29ce9f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e29ce9f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e29ce9f9

Branch: refs/heads/branch-2.1
Commit: e29ce9f93753d79edfa4e8b864c31c34e33ea635
Parents: 7c04a95
Author: Allan Yang 
Authored: Tue Oct 23 16:13:24 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:13:24 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e29ce9f9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 603bf4c55 -> 3b68e5393


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b68e539
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b68e539
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b68e539

Branch: refs/heads/master
Commit: 3b68e5393edba011146962c7457faffc1e3c0ee7
Parents: 603bf4c
Author: Allan Yang 
Authored: Tue Oct 23 16:09:05 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:09:05 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b68e539/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 01d94d710 -> a31e71564


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a31e7156
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a31e7156
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a31e7156

Branch: refs/heads/branch-2.0
Commit: a31e71564fd5c276cef7b8381e5285b8703eef13
Parents: 01d94d7
Author: Allan Yang 
Authored: Tue Oct 23 16:06:19 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:06:19 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a31e7156/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;