hbase git commit: HBASE-18071 Fix flaky test TestStochasticLoadBalancer#testBalanceCluster

2017-05-19 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 5c728b0f0 -> 8b70d043e


HBASE-18071 Fix flaky test TestStochasticLoadBalancer#testBalanceCluster

Test was failing on clusters with large number of servers or regions. Using 
commonly using config settings like some other tests seems to work.

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b70d043
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b70d043
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b70d043

Branch: refs/heads/master
Commit: 8b70d043e48c2dd5fdf304ee6b1212c3d8006de7
Parents: 5c728b0
Author: Umesh Agashe 
Authored: Fri May 19 10:02:45 2017 -0700
Committer: Michael Stack 
Committed: Fri May 19 11:09:28 2017 -0700

--
 .../hbase/master/balancer/TestStochasticLoadBalancer.java   | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b70d043/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index 9d193d2..a20925b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -118,7 +118,10 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
*/
   @Test
   public void testBalanceCluster() throws Exception {
-
+conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 200L);
+conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 
1000); // 90 sec
+conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
+loadBalancer.setConf(conf);
 for (int[] mockCluster : clusterStateMocks) {
   Map servers = 
mockClusterServers(mockCluster);
   List list = convertToList(servers);



hbase git commit: HBASE-18058 Zookeeper retry sleep time should have an upper limit (Allan Yang)

2017-05-19 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f348caf7f -> 300c5388f


HBASE-18058 Zookeeper retry sleep time should have an upper limit (Allan Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/300c5388
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/300c5388
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/300c5388

Branch: refs/heads/branch-1
Commit: 300c5388f2358418faff53558967e00e616c8e1a
Parents: f348caf
Author: tedyu 
Authored: Fri May 19 10:58:38 2017 -0700
Committer: tedyu 
Committed: Fri May 19 10:58:38 2017 -0700

--
 .../apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java  | 8 
 .../main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java  | 3 ++-
 hbase-common/src/main/resources/hbase-default.xml| 7 +++
 3 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/300c5388/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index 9dad2d1..75cee0a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -100,20 +100,20 @@ public class RecoverableZooKeeper {
   private static final int ID_LENGTH_SIZE =  Bytes.SIZEOF_INT;
 
   public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
-  Watcher watcher, int maxRetries, int retryIntervalMillis)
+  Watcher watcher, int maxRetries, int retryIntervalMillis, int 
maxSleepTime)
   throws IOException {
-this(quorumServers, sessionTimeout, watcher, maxRetries, 
retryIntervalMillis,
+this(quorumServers, sessionTimeout, watcher, maxRetries, 
retryIntervalMillis, maxSleepTime,
 null);
   }
 
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
   justification="None. Its always been this way.")
   public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
-  Watcher watcher, int maxRetries, int retryIntervalMillis, String 
identifier)
+  Watcher watcher, int maxRetries, int retryIntervalMillis, int 
maxSleepTime, String identifier)
   throws IOException {
 // TODO: Add support for zk 'chroot'; we don't add it to the quorumServers 
String as we should.
 this.retryCounterFactory =
-  new RetryCounterFactory(maxRetries+1, retryIntervalMillis);
+  new RetryCounterFactory(maxRetries+1, retryIntervalMillis, maxSleepTime);
 
 if (identifier == null || identifier.length() == 0) {
   // the identifier = processID@hostName

http://git-wip-us.apache.org/repos/asf/hbase/blob/300c5388/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 3cbc317..4f4b2eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -133,10 +133,11 @@ public class ZKUtil {
 int retry = conf.getInt("zookeeper.recovery.retry", 3);
 int retryIntervalMillis =
   conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
+int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 
6);
 zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout",
 1000);
 return new RecoverableZooKeeper(ensemble, timeout, watcher,
-retry, retryIntervalMillis, identifier);
+retry, retryIntervalMillis, maxSleepTime, identifier);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/300c5388/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index c571289..e9fe34f 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -103,6 +103,13 @@ possible configurations would overwhelm and obscure the 
important.
 and running -->
 
   
+zookeeper.recovery.retry.maxsleeptime
+6
+Max sleep time before retry zookeeper operations in 
milliseconds,
+a max time is needed here so that sleep time won't grow unboundedly
+
+  
+ 

hbase git commit: HBASE-17286 Add goal to remote-resources plugin

2017-05-19 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f9dc4cad6 -> f348caf7f


HBASE-17286 Add goal to remote-resources plugin

With Apache parent pom v12 our remote-resources-plugin execution id was
shadowing the parent declaration, and our configuration would get run to
aggregate LICENSE files correctly. When upgrading to v18, apache changed
the execution id, so our configuration no longer gets used.

Add an explicit goal to our usage of the remote-resources-plugin and
change the name to something more descriptive and less likely to
conflict (either intentionally or not).

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f348caf7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f348caf7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f348caf7

Branch: refs/heads/branch-1
Commit: f348caf7fe5db2d6630f55c6859f5829a9d7a8ea
Parents: f9dc4ca
Author: Mike Drob 
Authored: Thu May 18 15:19:21 2017 -0700
Committer: Josh Elser 
Committed: Fri May 19 13:29:15 2017 -0400

--
 hbase-assembly/pom.xml | 5 -
 hbase-shaded/pom.xml   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f348caf7/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 8ad1c08..2c78516 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -44,7 +44,10 @@
 1.5
 
   
-default
+aggregate-licenses
+
+  process
+
 
   
 ${build.year}

http://git-wip-us.apache.org/repos/asf/hbase/blob/f348caf7/hbase-shaded/pom.xml
--
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index 319fddd..2c2613b 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -72,7 +72,10 @@
   1.5
   
 
-  default
+  aggregate-licenses
+  
+process
+  
   
 
   ${build.year}



[43/50] [abbrv] hbase git commit: HBASE-17955 Various reviewboard improvements to space quota work

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index 4577bcf..e8a57e9 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -4362,7 +4362,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota space = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
   getSpaceFieldBuilder() {
 if (spaceBuilder_ == null) {
   spaceBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6077,7 +6077,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota quota = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
   getQuotaFieldBuilder() {
 if (quotaBuilder_ == null) {
   quotaBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6143,13 +6143,13 @@ public final class QuotaProtos {
   org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
 /**
- * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ * optional .hbase.pb.SpaceViolationPolicy violation_policy = 
1;
  */
-boolean hasPolicy();
+boolean hasViolationPolicy();
 /**
- * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ * optional .hbase.pb.SpaceViolationPolicy violation_policy = 
1;
  */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy
 getPolicy();
+
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy
 getViolationPolicy();
 
 /**
  * optional bool in_violation = 2;
@@ -6163,7 +6163,7 @@ public final class QuotaProtos {
   /**
* 
* Represents the state of a quota on a table. Either the quota is not in 
violation
-   * or it is in violatino there is a violation policy which should be in 
effect.
+   * or it is in violation there is a violation policy which should be in 
effect.
* 
*
* Protobuf type {@code hbase.pb.SpaceQuotaStatus}
@@ -6177,7 +6177,7 @@ public final class QuotaProtos {
   super(builder);
 }
 private SpaceQuotaStatus() {
-  policy_ = 1;
+  violationPolicy_ = 1;
   inViolation_ = false;
 }
 
@@ -6216,7 +6216,7 @@ public final class QuotaProtos {
 unknownFields.mergeVarintField(1, rawValue);
   } else {
 bitField0_ |= 0x0001;
-policy_ = rawValue;
+violationPolicy_ = rawValue;
   }
   break;
 }
@@ -6250,19 +6250,19 @@ public final class QuotaProtos {
 }
 
 private int bitField0_;
-public static final int POLICY_FIELD_NUMBER = 1;
-private int policy_;
+public static final int VIOLATION_POLICY_FIELD_NUMBER = 1;
+private int violationPolicy_;
 /**
- * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ * optional .hbase.pb.SpaceViolationPolicy violation_policy = 
1;
  */
-public boolean hasPolicy() {
+public boolean hasViolationPolicy() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ * optional .hbase.pb.SpaceViolationPolicy violation_policy = 
1;
  */
-public 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy
 getPolicy() 

[25/50] [abbrv] hbase git commit: HBASE-17478 Avoid reporting FS use when quotas are disabled

2017-05-19 Thread elserj
HBASE-17478 Avoid reporting FS use when quotas are disabled

Also, gracefully produce responses when quotas are disabled.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfe7a701
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfe7a701
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfe7a701

Branch: refs/heads/HBASE-16961
Commit: dfe7a701f22c205f5582b1861229a1ce49347acc
Parents: 01a6b62
Author: Josh Elser 
Authored: Tue Jan 17 14:41:45 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:19:35 2017 -0400

--
 .../hadoop/hbase/master/MasterRpcServices.java  |  4 +++
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 13 +--
 .../hbase/regionserver/HRegionServer.java   |  5 ++-
 .../hbase/quotas/TestMasterQuotaManager.java| 37 
 4 files changed, 56 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfe7a701/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index db48bdb..be782df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -59,6 +59,7 @@ import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -1911,6 +1912,9 @@ public class MasterRpcServices extends RSRpcServices
   RegionSpaceUseReportRequest request) throws ServiceException {
 try {
   master.checkInitialized();
+  if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) {
+return RegionSpaceUseReportResponse.newBuilder().build();
+  }
   MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
   for (RegionSpaceUse report : request.getSpaceUseList()) {
 quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());

http://git-wip-us.apache.org/repos/asf/hbase/blob/dfe7a701/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index a5832f9..cb614ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.quotas;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -58,6 +59,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
 @InterfaceStability.Evolving
 public class MasterQuotaManager implements RegionStateListener {
   private static final Log LOG = LogFactory.getLog(MasterQuotaManager.class);
+  private static final Map EMPTY_MAP = 
Collections.unmodifiableMap(
+  new HashMap<>());
 
   private final MasterServices masterServices;
   private NamedLock namespaceLocks;
@@ -529,13 +532,19 @@ public class MasterQuotaManager implements 
RegionStateListener {
   }
 
   public void addRegionSize(HRegionInfo hri, long size) {
-// TODO Make proper API
+if (null == regionSizes) {
+  return;
+}
+// TODO Make proper API?
 // TODO Prevent from growing indefinitely
 regionSizes.put(hri, size);
   }
 
   public Map snapshotRegionSizes() {
-// TODO Make proper API
+if (null == regionSizes) {
+  return EMPTY_MAP;
+}
+// TODO Make proper API?
 return new HashMap<>(regionSizes);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dfe7a701/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 

[34/50] [abbrv] hbase git commit: HBASE-17794 Swap "violation" for "snapshot" where appropriate

2017-05-19 Thread elserj
HBASE-17794 Swap "violation" for "snapshot" where appropriate

A couple of variables and comments in which violation is incorrectly
used to describe what the code is doing. This was a hold over from early
implementation -- need to scrub these out for clarity.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60074424
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60074424
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60074424

Branch: refs/heads/HBASE-16961
Commit: 60074424e48bcc6b7c7a397b094806063c09990c
Parents: 5729491
Author: Josh Elser 
Authored: Thu Mar 16 19:26:14 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:28:49 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java| 4 ++--
 hbase-protocol-shaded/src/main/protobuf/Quota.proto| 2 +-
 .../org/apache/hadoop/hbase/quotas/QuotaObserverChore.java | 6 +++---
 .../apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java| 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60074424/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index ad59517..c008702 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -228,7 +228,7 @@ public class QuotaTableUtil {
   }
 
   /**
-   * Creates a {@link Scan} which returns only quota violations from the quota 
table.
+   * Creates a {@link Scan} which returns only quota snapshots from the quota 
table.
*/
   public static Scan makeQuotaSnapshotScan() {
 Scan s = new Scan();
@@ -246,7 +246,7 @@ public class QuotaTableUtil {
* will throw an {@link IllegalArgumentException}.
*
* @param result A row from the quota table.
-   * @param snapshots A map of violations to add the result of this method 
into.
+   * @param snapshots A map of snapshots to add the result of this method into.
*/
   public static void extractQuotaSnapshot(
   Result result, Map snapshots) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/60074424/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 1a6d5ed..364c58b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -98,7 +98,7 @@ message SpaceLimitRequest {
 }
 
 // Represents the state of a quota on a table. Either the quota is not in 
violation
-// or it is in violatino there is a violation policy which should be in effect.
+// or it is in violation there is a violation policy which should be in effect.
 message SpaceQuotaStatus {
   optional SpaceViolationPolicy policy = 1;
   optional bool in_violation = 2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/60074424/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 94c5c87..254f2a1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -532,9 +532,9 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given table.
+   * Stores the quota state for the given table.
*/
-  void setTableQuotaViolation(TableName table, SpaceQuotaSnapshot snapshot) {
+  void setTableQuotaSnapshot(TableName table, SpaceQuotaSnapshot snapshot) {
 this.tableQuotaSnapshots.put(table, snapshot);
   }
 
@@ -552,7 +552,7 @@ public class QuotaObserverChore extends ScheduledChore {
   }
 
   /**
-   * Stores the quota violation state for the given namespace.
+   * Stores the quota state for the given namespace.
*/
   void setNamespaceQuotaSnapshot(String namespace, SpaceQuotaSnapshot 
snapshot) {
 this.namespaceQuotaSnapshots.put(namespace, snapshot);


[48/50] [abbrv] hbase git commit: HBASE-17981 Consolidate the space quota shell commands

2017-05-19 Thread elserj
HBASE-17981 Consolidate the space quota shell commands


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3561b115
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3561b115
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3561b115

Branch: refs/heads/HBASE-16961
Commit: 3561b115331e6f95ae9ef5d9fc325dc3cc2ce16a
Parents: 56f1cd6
Author: Josh Elser 
Authored: Mon May 1 19:44:47 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:37:46 2017 -0400

--
 .../hadoop/hbase/client/QuotaStatusCalls.java   |   29 -
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   44 +-
 .../hbase/shaded/protobuf/RequestConverter.java |   11 -
 .../shaded/protobuf/generated/AdminProtos.java  |  338 ++-
 .../shaded/protobuf/generated/QuotaProtos.java  | 1923 +-
 .../src/main/protobuf/Admin.proto   |4 -
 .../src/main/protobuf/Quota.proto   |   12 -
 .../hbase/protobuf/generated/QuotaProtos.java   |   34 +-
 .../hbase/regionserver/RSRpcServices.java   |   30 -
 .../hadoop/hbase/master/MockRegionServer.java   |9 -
 .../hbase/quotas/TestQuotaStatusRPCs.java   |   13 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb   |   18 +-
 hbase-shell/src/main/ruby/hbase_constants.rb|1 +
 hbase-shell/src/main/ruby/shell.rb  |1 -
 .../ruby/shell/commands/list_quota_snapshots.rb |   48 +-
 .../shell/commands/list_quota_table_sizes.rb|8 +-
 .../shell/commands/list_quota_violations.rb |   48 -
 .../test/ruby/hbase/quotas_test_no_cluster.rb   |   19 +-
 18 files changed, 273 insertions(+), 2317 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
index af36d1e..70f6fb6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaEnforcementsResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
 
@@ -127,32 +126,4 @@ public class QuotaStatusCalls {
 };
 return ProtobufUtil.call(callable);
   }
-
-  /**
-   * See {@link #getRegionServerSpaceQuotaEnforcements(ClusterConnection, 
RpcControllerFactory, int, ServerName)}
-   */
-  public static GetSpaceQuotaEnforcementsResponse 
getRegionServerSpaceQuotaEnforcements(
-  ClusterConnection clusterConn, int timeout, ServerName sn) throws 
IOException {
-RpcControllerFactory rpcController = clusterConn.getRpcControllerFactory();
-return getRegionServerSpaceQuotaEnforcements(clusterConn, rpcController, 
timeout, sn);
-  }
-
-  /**
-   * Executes an RPC to the RegionServer identified by the {@code ServerName} 
to fetch its view on
-   * enforced space quotas.
-   */
-  public static GetSpaceQuotaEnforcementsResponse 
getRegionServerSpaceQuotaEnforcements(
-  ClusterConnection conn, RpcControllerFactory factory,
-  int timeout, ServerName sn) throws IOException {
-final AdminService.BlockingInterface admin = conn.getAdmin(sn);
-Callable callable =
-new Callable() {
-  @Override
-  public GetSpaceQuotaEnforcementsResponse call() throws Exception {
-return admin.getSpaceQuotaEnforcements(
-factory.newController(), 
RequestConverter.buildGetSpaceQuotaEnforcementsRequest());
-  }
-};
-return ProtobufUtil.call(callable);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index ec480c4..1b670e6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 

[02/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes white spaces (Josh Elser)

2017-05-19 Thread elserj
HBASE-16995 Build client Java API and client protobuf messages - addendum fixes 
white spaces (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c0bc6df6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c0bc6df6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c0bc6df6

Branch: refs/heads/HBASE-16961
Commit: c0bc6df66f75e55eeddc680a8bd9268952a8a7ed
Parents: 0224ea1
Author: tedyu 
Authored: Thu Nov 17 10:42:18 2016 -0800
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../hbase/quotas/TestQuotaSettingsFactory.java|  2 +-
 .../shaded/protobuf/generated/QuotaProtos.java| 18 +-
 .../hbase/protobuf/generated/QuotaProtos.java |  4 ++--
 3 files changed, 12 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c0bc6df6/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
index 17015d6..e0012a7 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
@@ -44,7 +44,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestQuotaSettingsFactory {
-  
+
   @Test
   public void testAllQuotasAddedToList() {
 final SpaceQuota spaceQuota = SpaceQuota.newBuilder()

http://git-wip-us.apache.org/repos/asf/hbase/blob/c0bc6df6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index e3c6bfd..0ab2576 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -4362,7 +4362,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota space = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
   getSpaceFieldBuilder() {
 if (spaceBuilder_ == null) {
   spaceBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -5957,7 +5957,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota quota = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
   getQuotaFieldBuilder() {
 if (quotaBuilder_ == null) {
   quotaBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6020,37 +6020,37 @@ public final class QuotaProtos {
 
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_TimedQuota_descriptor;
-  private static final 
+  private static final
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
   internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
   private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
 internal_static_hbase_pb_Throttle_descriptor;
-  private static 

[35/50] [abbrv] hbase git commit: HBASE-17003 Documentation updates for space quotas

2017-05-19 Thread elserj
HBASE-17003 Documentation updates for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5729491d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5729491d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5729491d

Branch: refs/heads/HBASE-16961
Commit: 5729491d556f5e3b039dd11907ada8e1b90351eb
Parents: 3a2ec0b
Author: Josh Elser 
Authored: Thu Mar 16 16:21:14 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:28:49 2017 -0400

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 64 ++-
 1 file changed, 63 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5729491d/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index e4c077f..f9009f3 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1705,7 +1705,7 @@ handling multiple workloads:
 
 [[quota]]
 === Quotas
-HBASE-11598 introduces quotas, which allow you to throttle requests based on
+HBASE-11598 introduces RPC quotas, which allow you to throttle requests based 
on
 the following limits:
 
 . <>
@@ -1885,6 +1885,68 @@ at the same time and that fewer scans can be executed at 
the same time. A value
 `0.9` will give more queue/handlers to scans, so the number of scans executed 
will
 increase and the number of gets will decrease.
 
+[[space-quotas]]
+=== Space Quotas
+
+link:https://issues.apache.org/jira/browse/HBASE-16961[HBASE-16961] introduces 
a new type of
+quotas for HBase to leverage: filesystem quotas. These "space" quotas limit 
the amount of space
+on the filesystem that HBase namespaces and tables can consume. If a user, 
malicious or ignorant,
+has the ability to write data into HBase, with enough time, that user can 
effectively crash HBase
+(or worse HDFS) by consuming all available space. When there is no filesystem 
space available,
+HBase crashes because it can no longer create/sync data to the write-ahead log.
+
+This feature allows a for a limit to be set on the size of a table or 
namespace. When a space quota is set
+on a namespace, the quota's limit applies to the sum of usage of all tables in 
that namespace.
+When a table with a quota exists in a namespace with a quota, the table quota 
takes priority
+over the namespace quota. This allows for a scenario where a large limit can 
be placed on
+a collection of tables, but a single table in that collection can have a 
fine-grained limit set.
+
+The existing `set_quota` and `list_quota` HBase shell commands can be used to 
interact with
+space quotas. Space quotas are quotas with a `TYPE` of `SPACE` and have 
`LIMIT` and `POLICY`
+attributes. The `LIMIT` is a string that refers to the amount of space on the 
filesystem
+that the quota subject (e.g. the table or namespace) may consume. For example, 
valid values
+of `LIMIT` are `'10G'`, `'2T'`, or `'256M'`. The `POLICY` refers to the action 
that HBase will
+take when the quota subject's usage exceeds the `LIMIT`. The following are 
valid `POLICY` values.
+
+* `NO_INSERTS` - No new data may be written (e.g. `Put`, `Increment`, 
`Append`).
+* `NO_WRITES` - Same as `NO_INSERTS` but `Deletes` are also disallowed.
+* `NO_WRITES_COMPACTIONS` - Same as `NO_WRITES` but compactions are also 
disallowed.
+* `DISABLE` - The table(s) are disabled, preventing all read/write access.
+
+.Setting simple space quotas
+
+# Sets a quota on the table 't1' with a limit of 1GB, disallowing 
Puts/Increments/Appends when the table exceeds 1GB
+hbase> set_quota TYPE => SPACE, TABLE => 't1', LIMIT => '1G', POLICY => 
NO_INSERTS
+
+# Sets a quota on the namespace 'ns1' with a limit of 50TB, disallowing 
Puts/Increments/Appends/Deletes
+hbase> set_quota TYPE => SPACE, NAMESPACE => 'ns1', LIMIT => '50T', POLICY => 
NO_WRITES
+
+# Sets a quota on the table 't3' with a limit of 2TB, disallowing any writes 
and compactions when the table exceeds 2TB.
+hbase> set_quota TYPE => SPACE, TABLE => 't3', LIMIT => '2T', POLICY => 
NO_WRITES_COMPACTIONS
+
+# Sets a quota on the table 't2' with a limit of 50GB, disabling the table 
when it exceeds 50GB
+hbase> set_quota TYPE => SPACE, TABLE => 't2', LIMIT => '50G', POLICY => 
DISABLE
+
+
+Consider the following scenario to set up quotas on a namespace, overriding 
the quota on tables in that namespace
+
+.Table and Namespace space quotas
+
+hbase> create_namespace 'ns1'
+hbase> create 'ns1:t1'
+hbase> create 'ns1:t2'
+hbase> create 'ns1:t3'
+hbase> set_quota TYPE => SPACE, NAMESPACE => 

[45/50] [abbrv] hbase git commit: HBASE-17981 Consolidate the space quota shell commands

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-protocol-shaded/src/main/protobuf/Admin.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 964b035..39e73b6 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -329,8 +329,4 @@ service AdminService {
   /** Fetches the RegionServer's view of space quotas */
   rpc GetSpaceQuotaSnapshots(GetSpaceQuotaSnapshotsRequest)
 returns(GetSpaceQuotaSnapshotsResponse);
-
-  /** Fetches the RegionServer's space quota active enforcements */
-  rpc GetSpaceQuotaEnforcements(GetSpaceQuotaEnforcementsRequest)
-returns(GetSpaceQuotaEnforcementsResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 0d171b3..0d74435 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -136,18 +136,6 @@ message GetSpaceQuotaSnapshotsResponse {
   repeated TableQuotaSnapshot snapshots = 1;
 }
 
-message GetSpaceQuotaEnforcementsRequest {
-}
-
-message GetSpaceQuotaEnforcementsResponse {
-  // Cannot use TableName as a map key, do the repeated nested message by hand.
-  message TableViolationPolicy {
-optional TableName table_name = 1;
-optional SpaceViolationPolicy violation_policy = 2;
-  }
-  repeated TableViolationPolicy violation_policies = 1;
-}
-
 message GetQuotaStatesRequest {
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index fad9f44..717ec73 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -982,7 +982,7 @@ public final class QuotaProtos {
 
   public final boolean isInitialized() {
 if (!hasTimeUnit()) {
-  
+
   return false;
 }
 return true;
@@ -2009,37 +2009,37 @@ public final class QuotaProtos {
   public final boolean isInitialized() {
 if (hasReqNum()) {
   if (!getReqNum().isInitialized()) {
-
+
 return false;
   }
 }
 if (hasReqSize()) {
   if (!getReqSize().isInitialized()) {
-
+
 return false;
   }
 }
 if (hasWriteNum()) {
   if (!getWriteNum().isInitialized()) {
-
+
 return false;
   }
 }
 if (hasWriteSize()) {
   if (!getWriteSize().isInitialized()) {
-
+
 return false;
   }
 }
 if (hasReadNum()) {
   if (!getReadNum().isInitialized()) {
-
+
 return false;
   }
 }
 if (hasReadSize()) {
   if (!getReadSize().isInitialized()) {
-
+
 return false;
   }
 }
@@ -2169,7 +2169,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_num = 1;
*/
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+  org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
   getReqNumFieldBuilder() {
 if (reqNumBuilder_ == null) {
   reqNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2286,7 +2286,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_size = 2;
*/
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
+  org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, 

[50/50] [abbrv] hbase git commit: HBASE-17977 Enable the MasterSpaceQuotaObserver by default

2017-05-19 Thread elserj
HBASE-17977 Enable the MasterSpaceQuotaObserver by default

It should be the normal case that HBase automatically deletes
quotas for deleted tables. Switch the Observer to be on by
default and add an option to instead prevent it from being added.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/97bfe34a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/97bfe34a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/97bfe34a

Branch: refs/heads/HBASE-16961
Commit: 97bfe34af066d80aa7739da3842c6ae172425f65
Parents: 3ef2be1
Author: Josh Elser 
Authored: Tue May 2 14:58:10 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:37:51 2017 -0400

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 28 +++
 .../hbase/quotas/MasterSpaceQuotaObserver.java  |  3 +
 .../quotas/TestMasterSpaceQuotaObserver.java| 15 +++-
 .../TestMasterSpaceQuotaObserverWithMocks.java  | 84 
 src/main/asciidoc/_chapters/ops_mgt.adoc| 11 ++-
 5 files changed, 134 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/97bfe34a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 89f085e..cd96552 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -134,6 +134,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
 import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
@@ -794,6 +795,11 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 conf, this.clusterConnection);
 tableCFsUpdater.update();
 
+// Add the Observer to delete space quotas on table deletion before 
starting all CPs by
+// default with quota support, avoiding if user specifically asks to not 
load this Observer.
+if (QuotaUtil.isQuotaEnabled(conf)) {
+  updateConfigurationForSpaceQuotaObserver(conf);
+}
 // initialize master side coprocessors before we start handling requests
 status.setStatus("Initializing master coprocessors");
 this.cpHost = new MasterCoprocessorHost(this, this.conf);
@@ -942,6 +948,28 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 zombieDetector.interrupt();
   }
 
+  /**
+   * Adds the {@code MasterSpaceQuotaObserver} to the list of configured 
Master observers to
+   * automatically remove space quotas for a table when that table is deleted.
+   */
+  @VisibleForTesting
+  public void updateConfigurationForSpaceQuotaObserver(Configuration conf) {
+// We're configured to not delete quotas on table deletion, so we don't 
need to add the obs.
+if (!conf.getBoolean(
+  MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE,
+  MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) {
+  return;
+}
+String[] masterCoprocs = 
conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
+final int length = null == masterCoprocs ? 0 : masterCoprocs.length;
+String[] updatedCoprocs = new String[length + 1];
+if (length > 0) {
+  System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, 
masterCoprocs.length);
+}
+updatedCoprocs[length] = MasterSpaceQuotaObserver.class.getName();
+conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, 
updatedCoprocs);
+  }
+
   private void initMobCleaner() {
 this.expiredMobFileCleanerChore = new ExpiredMobFileCleanerChore(this);
 getChoreService().scheduleChore(expiredMobFileCleanerChore);

http://git-wip-us.apache.org/repos/asf/hbase/blob/97bfe34a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
index 299ba39..7c86525 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
+++ 

[18/50] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
index c493b25..943c898 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -22,16 +22,12 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -40,20 +36,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -62,7 +53,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Multimap;
 
@@ -72,11 +62,8 @@ import com.google.common.collect.Multimap;
 @Category(LargeTests.class)
 public class TestQuotaObserverChoreWithMiniCluster {
   private static final Log LOG = 
LogFactory.getLog(TestQuotaObserverChoreWithMiniCluster.class);
-  private static final int SIZE_PER_VALUE = 256;
-  private static final String F1 = "f1";
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private static final AtomicLong COUNTER = new AtomicLong(0);
-  private static final long ONE_MEGABYTE = 1024L * 1024L;
   private static final long DEFAULT_WAIT_MILLIS = 500;
 
   @Rule
@@ -84,18 +71,19 @@ public class TestQuotaObserverChoreWithMiniCluster {
 
   private HMaster master;
   private QuotaObserverChore chore;
-  private SpaceQuotaViolationNotifierForTest violationNotifier;
+  private SpaceQuotaSnapshotNotifierForTest snapshotNotifier;
+  private SpaceQuotaHelperForTests helper;
 
   @BeforeClass
   public static void setUp() throws Exception {
 Configuration conf = TEST_UTIL.getConfiguration();
 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 
1000);
 conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 
1000);
-conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_DELAY_KEY, 1000);
-conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_PERIOD_KEY, 1000);
+conf.setInt(QuotaObserverChore.QUOTA_OBSERVER_CHORE_DELAY_KEY, 1000);
+conf.setInt(QuotaObserverChore.QUOTA_OBSERVER_CHORE_PERIOD_KEY, 1000);
 conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
-conf.setClass(SpaceQuotaViolationNotifierFactory.VIOLATION_NOTIFIER_KEY,
-SpaceQuotaViolationNotifierForTest.class, 
SpaceQuotaViolationNotifier.class);
+conf.setClass(SpaceQuotaSnapshotNotifierFactory.SNAPSHOT_NOTIFIER_KEY,
+SpaceQuotaSnapshotNotifierForTest.class, 
SpaceQuotaSnapshotNotifier.class);
 TEST_UTIL.startMiniCluster(1);
   }
 
@@ -131,40 +119,55 @@ public class TestQuotaObserverChoreWithMiniCluster {
 }
 
 master = TEST_UTIL.getMiniHBaseCluster().getMaster();
-violationNotifier =
-(SpaceQuotaViolationNotifierForTest) 
master.getSpaceQuotaViolationNotifier();
-violationNotifier.clearTableViolations();
+snapshotNotifier =
+(SpaceQuotaSnapshotNotifierForTest) 
master.getSpaceQuotaSnapshotNotifier();
+snapshotNotifier.clearSnapshots();
 chore = master.getQuotaObserverChore();
+

[49/50] [abbrv] hbase git commit: HBASE-17978 Ensure superusers can circumvent actions restricted by space quota violations

2017-05-19 Thread elserj
HBASE-17978 Ensure superusers can circumvent actions restricted by space quota 
violations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ef2be1f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ef2be1f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ef2be1f

Branch: refs/heads/HBASE-16961
Commit: 3ef2be1f42768d4cb69d8ca127301fa9892624ba
Parents: 3561b11
Author: Josh Elser 
Authored: Wed May 3 12:10:50 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:37:51 2017 -0400

--
 .../hbase/regionserver/RSRpcServices.java   |   4 +
 .../hbase/quotas/SpaceQuotaHelperForTests.java  |  27 +-
 .../quotas/TestSuperUserQuotaPermissions.java   | 300 +++
 3 files changed, 329 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ef2be1f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 41fe3e5..b3ca94d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -105,6 +105,7 @@ import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
 import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
 import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
@@ -1509,7 +1510,10 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   checkOpen();
   requestCount.increment();
   Region region = getRegion(request.getRegion());
+  // Quota support is enabled, the requesting user is not system/super user
+  // and a quota policy is enforced that disables compactions.
   if (QuotaUtil.isQuotaEnabled(getConfiguration()) &&
+  !Superusers.isSuperUser(RpcServer.getRequestUser()) &&
   
this.regionServer.getRegionServerSpaceQuotaManager().areCompactionsDisabled(
   region.getTableDesc().getTableName())) {
 throw new DoNotRetryIOException("Compactions on this region are "

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ef2be1f/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
index b7c51a2..1e2235a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java
@@ -110,6 +110,18 @@ public class SpaceQuotaHelperForTests {
 }
   }
 
+  QuotaSettings getTableSpaceQuota(Connection conn, TableName tn) throws 
IOException {
+try (QuotaRetriever scanner = QuotaRetriever.open(
+conn.getConfiguration(), new 
QuotaFilter().setTableFilter(tn.getNameAsString( {
+  for (QuotaSettings setting : scanner) {
+if (setting.getTableName().equals(tn) && setting.getQuotaType() == 
QuotaType.SPACE) {
+  return setting;
+}
+  }
+  return null;
+}
+  }
+
   /**
* Waits 30seconds for the HBase quota table to exist.
*/
@@ -130,7 +142,10 @@ public class SpaceQuotaHelperForTests {
   }
 
   void writeData(TableName tn, long sizeInBytes) throws IOException {
-final Connection conn = testUtil.getConnection();
+writeData(testUtil.getConnection(), tn, sizeInBytes);
+  }
+
+  void writeData(Connection conn, TableName tn, long sizeInBytes) throws 
IOException {
 final Table table = conn.getTable(tn);
 try {
   List updates = new ArrayList<>();
@@ -226,8 +241,16 @@ public class SpaceQuotaHelperForTests {
 return 
createTableWithRegions(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, 
numRegions);
   }
 
+  TableName createTableWithRegions(Admin admin, int numRegions) throws 
Exception {
+return createTableWithRegions(
+testUtil.getAdmin(), NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR, 
numRegions);
+  }
+
   TableName createTableWithRegions(String namespace, int numRegions) throws 
Exception {
-   

[14/50] [abbrv] hbase git commit: HBASE-17557 HRegionServer#reportRegionSizesForQuotas() should respond to UnsupportedOperationException

2017-05-19 Thread elserj
HBASE-17557 HRegionServer#reportRegionSizesForQuotas() should respond to 
UnsupportedOperationException


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ff16d48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ff16d48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ff16d48

Branch: refs/heads/HBASE-16961
Commit: 1ff16d48a08327849f7bd115530b49d171cfedce
Parents: 0b6b40d
Author: tedyu 
Authored: Mon Jan 30 07:47:40 2017 -0800
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../quotas/FileSystemUtilizationChore.java  | 20 +---
 .../hbase/regionserver/HRegionServer.java   | 24 
 2 files changed, 36 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ff16d48/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
index 01540eb..efc17ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -53,6 +53,9 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
   static final String FS_UTILIZATION_MAX_ITERATION_DURATION_KEY = 
"hbase.regionserver.quotas.fs.utilization.chore.max.iteration.millis";
   static final long FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT = 5000L;
 
+  private int numberOfCyclesToSkip = 0, prevNumberOfCyclesToSkip = 0;
+  private static final int CYCLE_UPPER_BOUND = 32;
+
   private final HRegionServer rs;
   private final long maxIterationMillis;
   private Iterator leftoverRegions;
@@ -67,6 +70,10 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
 
   @Override
   protected void chore() {
+if (numberOfCyclesToSkip > 0) {
+  numberOfCyclesToSkip--;
+  return;
+}
 final Map onlineRegionSizes = new HashMap<>();
 final Set onlineRegions = new HashSet<>(rs.getOnlineRegions());
 // Process the regions from the last run if we have any. If we are somehow 
having difficulty
@@ -126,7 +133,14 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
   + skippedSplitParents + " regions due to being the parent of a 
split, and"
   + skippedRegionReplicas + " regions due to being region replicas.");
 }
-reportRegionSizesToMaster(onlineRegionSizes);
+if (!reportRegionSizesToMaster(onlineRegionSizes)) {
+  // backoff reporting
+  numberOfCyclesToSkip = prevNumberOfCyclesToSkip > 0 ? 2 * 
prevNumberOfCyclesToSkip : 1;
+  if (numberOfCyclesToSkip > CYCLE_UPPER_BOUND) {
+numberOfCyclesToSkip = CYCLE_UPPER_BOUND;
+  }
+  prevNumberOfCyclesToSkip = numberOfCyclesToSkip;
+}
   }
 
   /**
@@ -166,8 +180,8 @@ public class FileSystemUtilizationChore extends 
ScheduledChore {
*
* @param onlineRegionSizes The computed region sizes to report.
*/
-  void reportRegionSizesToMaster(Map onlineRegionSizes) {
-this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
+  boolean reportRegionSizesToMaster(Map onlineRegionSizes) {
+return this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ff16d48/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 8130312..2b3e8f5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
@@ -1248,13 +1249,14 @@ public class HRegionServer extends HasThread implements
* Reports the given map of Regions and their size on the filesystem to the 
active Master.
*
* 

[28/50] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index b5b41f3..6780414 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -72382,6 +72382,18 @@ public final class MasterProtos {
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest
 request,
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
 
+  /**
+   * 
+   ** Fetches the Master's view of space quotas
+   * 
+   *
+   * rpc 
GetSpaceQuotaRegionSizes(.hbase.pb.GetSpaceQuotaRegionSizesRequest) returns 
(.hbase.pb.GetSpaceQuotaRegionSizesResponse);
+   */
+  public abstract void getSpaceQuotaRegionSizes(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
 }
 
 public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service 
newReflectiveService(
@@ -72955,6 +72967,14 @@ public final class MasterProtos {
   impl.removeDrainFromRegionServers(controller, request, done);
 }
 
+@java.lang.Override
+public  void getSpaceQuotaRegionSizes(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest
 request,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done) {
+  impl.getSpaceQuotaRegionSizes(controller, request, done);
+}
+
   };
 }
 
@@ -73119,6 +73139,8 @@ public final class MasterProtos {
   return impl.drainRegionServers(controller, 
(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
 case 70:
   return impl.removeDrainFromRegionServers(controller, 
(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
+case 71:
+  return impl.getSpaceQuotaRegionSizes(controller, 
(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest)request);
 default:
   throw new java.lang.AssertionError("Can't get here.");
   }
@@ -73275,6 +73297,8 @@ public final class MasterProtos {
   return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
 case 70:
   return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
+case 71:
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest.getDefaultInstance();
 default:
   throw new java.lang.AssertionError("Can't get here.");
   }
@@ -73431,6 +73455,8 @@ public final class MasterProtos {
   return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
 case 70:
   return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
+case 71:
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.getDefaultInstance();
 default:
   throw new java.lang.AssertionError("Can't get here.");
   }
@@ -74318,6 +74344,18 @@ public final class MasterProtos {
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest
 request,
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
 
+/**
+ * 
+ ** Fetches the Master's view of space quotas
+ * 
+ *
+ * rpc 
GetSpaceQuotaRegionSizes(.hbase.pb.GetSpaceQuotaRegionSizesRequest) returns 
(.hbase.pb.GetSpaceQuotaRegionSizesResponse);
+ */
+public abstract void getSpaceQuotaRegionSizes(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest
 

[09/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes line lengths (Josh Elser)

2017-05-19 Thread elserj
HBASE-16995 Build client Java API and client protobuf messages - addendum fixes 
line lengths (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78130847
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78130847
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78130847

Branch: refs/heads/HBASE-16961
Commit: 78130847bd53579ef76476aafc160ad8ca28ee4d
Parents: c0bc6df
Author: tedyu 
Authored: Mon Nov 21 13:00:27 2016 -0800
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  | 20 
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  8 
 .../hbase/shaded/protobuf/ProtobufUtil.java |  7 ---
 3 files changed, 20 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/78130847/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 8512e39..7f1c180 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -128,7 +128,8 @@ public class QuotaSettingsFactory {
 
   static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota 
protoQuota) {
 if ((null == table && null == namespace) || (null != table && null != 
namespace)) {
-  throw new IllegalArgumentException("Can only construct 
SpaceLimitSettings for a table or namespace.");
+  throw new IllegalArgumentException(
+  "Can only construct SpaceLimitSettings for a table or namespace.");
 }
 if (null != table) {
   return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
@@ -300,29 +301,32 @@ public class QuotaSettingsFactory {
*/
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table to the given size in bytes.
-   * When the space usage is exceeded by the table, the provided {@link 
SpaceViolationPolicy} is enacted on the table.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table
+   * to the given size in bytes. When the space usage is exceeded by the 
table, the provided
+   * {@link SpaceViolationPolicy} is enacted on the table.
*
* @param tableName The name of the table on which the quota should be 
applied.
* @param sizeLimit The limit of a table's size in bytes.
* @param violationPolicy The action to take when the quota is exceeded.
* @return An {@link QuotaSettings} object.
*/
-  public static QuotaSettings limitTableSpace(final TableName tableName, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitTableSpace(
+  final TableName tableName, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
   }
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given namespace to the given size in bytes.
-   * When the space usage is exceeded by all tables in the namespace, the 
provided {@link SpaceViolationPolicy} is enacted on
-   * all tables in the namespace.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given
+   * namespace to the given size in bytes. When the space usage is exceeded by 
all tables in the
+   * namespace, the provided {@link SpaceViolationPolicy} is enacted on all 
tables in the namespace.
*
* @param namespace The namespace on which the quota should be applied.
* @param sizeLimit The limit of the namespace's size in bytes.
* @param violationPolicy The action to take when the the quota is exceeded.
* @return An {@link QuotaSettings} object.
*/
-  public static QuotaSettings limitNamespaceSpace(final String namespace, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitNamespaceSpace(
+  final String namespace, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/78130847/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java

[13/50] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

2017-05-19 Thread elserj
HBASE-17000 Implement computation of online region sizes and report to the 
Master

Includes a trivial implementation of the Master-side collection to
avoid. Only enough to write a test to verify RS collection.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b6b40d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b6b40d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b6b40d4

Branch: refs/heads/HBASE-16961
Commit: 0b6b40d4181ea3a0a4da32693d5c14af13dda8e3
Parents: 9a13dfa
Author: Josh Elser 
Authored: Mon Nov 7 13:46:42 2016 -0500
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../generated/RegionServerStatusProtos.java | 2071 +-
 .../src/main/protobuf/RegionServerStatus.proto  |   22 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   19 +
 .../quotas/FileSystemUtilizationChore.java  |  205 ++
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   15 +
 .../hbase/regionserver/HRegionServer.java   |   72 +
 .../quotas/TestFileSystemUtilizationChore.java  |  357 +++
 .../hadoop/hbase/quotas/TestRegionSizeUse.java  |  194 ++
 .../TestRegionServerRegionSpaceUseReport.java   |   99 +
 9 files changed, 3032 insertions(+), 22 deletions(-)
--




[07/50] [abbrv] hbase git commit: HBASE-16998 Implement Master-side analysis of region space reports

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/851fb37a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
new file mode 100644
index 000..98236c2
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -0,0 +1,596 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimap;
+
+/**
+ * Test class for {@link QuotaObserverChore} that uses a live HBase cluster.
+ */
+@Category(LargeTests.class)
+public class TestQuotaObserverChoreWithMiniCluster {
+  private static final Log LOG = 
LogFactory.getLog(TestQuotaObserverChoreWithMiniCluster.class);
+  private static final int SIZE_PER_VALUE = 256;
+  private static final String F1 = "f1";
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final AtomicLong COUNTER = new AtomicLong(0);
+  private static final long ONE_MEGABYTE = 1024L * 1024L;
+  private static final long DEFAULT_WAIT_MILLIS = 500;
+
+  @Rule
+  public TestName testName = new TestName();
+
+  private HMaster master;
+  private QuotaObserverChore chore;
+  private SpaceQuotaViolationNotifierForTest violationNotifier;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+Configuration conf = TEST_UTIL.getConfiguration();
+conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 
1000);
+conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 
1000);
+conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_DELAY_KEY, 1000);
+conf.setInt(QuotaObserverChore.VIOLATION_OBSERVER_CHORE_PERIOD_KEY, 1000);
+conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+TEST_UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void removeAllQuotas() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+

[31/50] [abbrv] hbase git commit: HBASE-17568 Better handle stale/missing region size reports

2017-05-19 Thread elserj
HBASE-17568 Better handle stale/missing region size reports

* Expire region reports in the master after a timeout.
* Move regions in violation out of violation when insufficient
region size reports are observed.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea3c0424
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea3c0424
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea3c0424

Branch: refs/heads/HBASE-16961
Commit: ea3c0424636c3204c1054361bdb7d73ac983b59a
Parents: 9adf90e
Author: Josh Elser 
Authored: Fri Feb 3 16:33:47 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:25:20 2017 -0400

--
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  86 ++-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |  53 -
 .../hbase/quotas/TestMasterQuotaManager.java|  48 +++-
 .../TestQuotaObserverChoreRegionReports.java| 233 +++
 5 files changed, 412 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea3c0424/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 867fd84..50a75b9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -150,6 +150,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
@@ -1942,8 +1943,9 @@ public class MasterRpcServices extends RSRpcServices
 return RegionSpaceUseReportResponse.newBuilder().build();
   }
   MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
+  final long now = EnvironmentEdgeManager.currentTime();
   for (RegionSpaceUse report : request.getSpaceUseList()) {
-quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());
+quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize(), now);
   }
   return RegionSpaceUseReportResponse.newBuilder().build();
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea3c0424/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index cb614ea..0622dba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -22,9 +22,12 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -47,6 +50,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Master Quota Manager.
  * It is responsible for initialize the quota table on the first-run and
@@ -68,7 +73,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock userLocks;
   private boolean enabled = false;
   private NamespaceAuditor namespaceQuotaManager;
-  private ConcurrentHashMap regionSizes;
+  private ConcurrentHashMap 
regionSizes;
 
   public MasterQuotaManager(final MasterServices masterServices) {
 this.masterServices = masterServices;
@@ -531,21 +536,88 @@ public class 

[27/50] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index a4c6095..d56def5 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -4362,7 +4362,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota space = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
   getSpaceFieldBuilder() {
 if (spaceBuilder_ == null) {
   spaceBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6077,7 +6077,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuota quota = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
 
   getQuotaFieldBuilder() {
 if (quotaBuilder_ == null) {
   quotaBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6351,7 +6351,7 @@ public final class QuotaProtos {
 return memoizedHashCode;
   }
   int hash = 41;
-  hash = (19 * hash) + getDescriptorForType().hashCode();
+  hash = (19 * hash) + getDescriptor().hashCode();
   if (hasPolicy()) {
 hash = (37 * hash) + POLICY_FIELD_NUMBER;
 hash = (53 * hash) + policy_;
@@ -6978,7 +6978,7 @@ public final class QuotaProtos {
 return memoizedHashCode;
   }
   int hash = 41;
-  hash = (19 * hash) + getDescriptorForType().hashCode();
+  hash = (19 * hash) + getDescriptor().hashCode();
   if (hasStatus()) {
 hash = (37 * hash) + STATUS_FIELD_NUMBER;
 hash = (53 * hash) + getStatus().hashCode();
@@ -7351,7 +7351,7 @@ public final class QuotaProtos {
* optional .hbase.pb.SpaceQuotaStatus status = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatus, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatus, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
 
   getStatusFieldBuilder() {
 if (statusBuilder_ == null) {
   statusBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -7476,163 +7476,5829 @@ public final class QuotaProtos {
 
   }
 
-  private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-internal_static_hbase_pb_TimedQuota_descriptor;
-  private static final
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-  internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
-  private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-internal_static_hbase_pb_Throttle_descriptor;
-  private static final
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-  internal_static_hbase_pb_Throttle_fieldAccessorTable;
-  private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-   

[24/50] [abbrv] hbase git commit: HBASE-17025 Add shell commands for space quotas

2017-05-19 Thread elserj
HBASE-17025 Add shell commands for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/01a6b624
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/01a6b624
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/01a6b624

Branch: refs/heads/HBASE-16961
Commit: 01a6b62493869416f7ceef9fe170d94db3ed756c
Parents: b71c9e1
Author: Josh Elser 
Authored: Wed Jan 11 11:55:29 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:19:35 2017 -0400

--
 hbase-shell/src/main/ruby/hbase/quotas.rb   |  62 -
 hbase-shell/src/main/ruby/hbase_constants.rb|   1 +
 .../src/main/ruby/shell/commands/set_quota.rb   |  45 +-
 .../hadoop/hbase/client/AbstractTestShell.java  |   1 +
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  | 137 +++
 hbase-shell/src/test/ruby/tests_runner.rb   |   1 +
 6 files changed, 242 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/01a6b624/hbase-shell/src/main/ruby/hbase/quotas.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb 
b/hbase-shell/src/main/ruby/hbase/quotas.rb
index bf2dc63..d99fe72 100644
--- a/hbase-shell/src/main/ruby/hbase/quotas.rb
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -24,14 +24,22 @@ java_import org.apache.hadoop.hbase.quotas.ThrottleType
 java_import org.apache.hadoop.hbase.quotas.QuotaFilter
 java_import org.apache.hadoop.hbase.quotas.QuotaRetriever
 java_import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory
+java_import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 
 module HBaseQuotasConstants
+  # RPC Quota constants
   GLOBAL_BYPASS = 'GLOBAL_BYPASS'
   THROTTLE_TYPE = 'THROTTLE_TYPE'
   THROTTLE = 'THROTTLE'
   REQUEST = 'REQUEST'
   WRITE = 'WRITE'
   READ = 'READ'
+  # Space quota constants
+  SPACE = 'SPACE'
+  NO_INSERTS = 'NO_INSERTS'
+  NO_WRITES = 'NO_WRITES'
+  NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'
+  DISABLE = 'DISABLE'
 end
 
 module Hbase
@@ -107,6 +115,54 @@ module Hbase
   @admin.setQuota(settings)
 end
 
+def limit_space(args)
+  raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? 
and args.kind_of?(Hash))
+  # Let the user provide a raw number
+  if args[LIMIT].is_a?(Numeric)
+limit = args[LIMIT]
+  else
+# Parse a string a 1K, 2G, etc.
+limit = _parse_size(args[LIMIT])
+  end
+  # Extract the policy, failing if something bogus was provided
+  policy = SpaceViolationPolicy.valueOf(args[POLICY])
+  # Create a table or namespace quota
+  if args.key?(TABLE)
+if args.key?(NAMESPACE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.limitTableSpace(TableName.valueOf(args.delete(TABLE)), 
limit, policy)
+  elsif args.key?(NAMESPACE)
+if args.key?(TABLE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.limitNamespaceSpace(args.delete(NAMESPACE), limit, policy)
+  else
+raise(ArgumentError, 'One of TABLE or NAMESPACE must be specified.')
+  end
+  # Apply the quota
+  @admin.setQuota(settings)
+end
+
+def remove_space_limit(args)
+  raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? 
and args.kind_of?(Hash))
+  if args.key?(TABLE)
+if args.key?(NAMESPACE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+table = TableName.valueOf(args.delete(TABLE))
+settings = QuotaSettingsFactory.removeTableSpaceLimit(table)
+  elsif args.key?(NAMESPACE)
+if args.key?(TABLE)
+  raise(ArgumentError, "Only one of TABLE or NAMESPACE can be 
specified.")
+end
+settings = 
QuotaSettingsFactory.removeNamespaceSpaceLimit(args.delete(NAMESPACE))
+  else
+raise(ArgumentError, 'One of TABLE or NAMESPACE must be specified.')
+  end
+  @admin.setQuota(settings)
+end
+
 def set_global_bypass(bypass, args)
   raise(ArgumentError, "Arguments should be a Hash") unless 
args.kind_of?(Hash)
 
@@ -171,7 +227,7 @@ module Hbase
   return _size_from_str(match[1].to_i, match[2])
 end
   else
-raise "Invalid size limit syntax"
+raise(ArgumentError, "Invalid size limit syntax")
   end
 end
 
@@ -188,7 +244,7 @@ module Hbase
 end
 
 if limit <= 0
-  raise "Invalid throttle limit, must be greater then 0"
+  raise(ArgumentError, "Invalid throttle limit, must be greater then 
0")
 end
 
 

[37/50] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 4706e90..3d6ae1b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -943,7 +943,11 @@ service MasterService {
   rpc removeDrainFromRegionServers(RemoveDrainFromRegionServersRequest)
 returns(RemoveDrainFromRegionServersResponse);
 
-  /** Fetches the Master's view of space quotas */
+  /** Fetches the Master's view of space utilization */
   rpc GetSpaceQuotaRegionSizes(GetSpaceQuotaRegionSizesRequest)
 returns(GetSpaceQuotaRegionSizesResponse);
+
+  /** Fetches the Master's view of quotas */
+  rpc GetQuotaStates(GetQuotaStatesRequest)
+returns(GetQuotaStatesResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-protocol-shaded/src/main/protobuf/Quota.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto 
b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 2d7e5f5..1a6d5ed 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -119,6 +119,7 @@ message GetSpaceQuotaRegionSizesResponse {
   message RegionSizes {
 optional TableName table_name = 1;
 optional uint64 size = 2;
+
   }
   repeated RegionSizes sizes = 1;
 }
@@ -146,3 +147,19 @@ message GetSpaceQuotaEnforcementsResponse {
   }
   repeated TableViolationPolicy violation_policies = 1;
 }
+
+message GetQuotaStatesRequest {
+}
+
+message GetQuotaStatesResponse {
+  message TableQuotaSnapshot {
+optional TableName table_name = 1;
+optional SpaceQuotaSnapshot snapshot = 2;
+  }
+  message NamespaceQuotaSnapshot {
+optional string namespace = 1;
+optional SpaceQuotaSnapshot snapshot = 2;
+  }
+  repeated TableQuotaSnapshot table_snapshots = 1;
+  repeated NamespaceQuotaSnapshot ns_snapshots = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 78268a8..33927de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -912,7 +912,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   // Create the quota snapshot notifier
   spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
   spaceQuotaSnapshotNotifier.initialize(getClusterConnection());
-  this.quotaObserverChore = new QuotaObserverChore(this);
+  this.quotaObserverChore = new QuotaObserverChore(this, 
getMasterMetrics());
   // Start the chore to read the region FS space reports and act on them
   getChoreService().scheduleChore(quotaObserverChore);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 50a75b9..2ac6fee 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -62,7 +62,9 @@ import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -112,8 +114,12 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;

[42/50] [abbrv] hbase git commit: HBASE-17955 Various reviewboard improvements to space quota work

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index c70b736..b886f5c 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10173,42 +10173,42 @@ public final class RegionServerStatusProtos {
  * A region identifier
  * 
  *
- * optional .hbase.pb.RegionInfo region = 1;
+ * optional .hbase.pb.RegionInfo region_info = 1;
  */
-boolean hasRegion();
+boolean hasRegionInfo();
 /**
  * 
  * A region identifier
  * 
  *
- * optional .hbase.pb.RegionInfo region = 1;
+ * optional .hbase.pb.RegionInfo region_info = 1;
  */
-org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegion();
+org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegionInfo();
 /**
  * 
  * A region identifier
  * 
  *
- * optional .hbase.pb.RegionInfo region = 1;
+ * optional .hbase.pb.RegionInfo region_info = 1;
  */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder
 getRegionOrBuilder();
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder
 getRegionInfoOrBuilder();
 
 /**
  * 
  * The size in bytes of the region
  * 
  *
- * optional uint64 size = 2;
+ * optional uint64 region_size = 2;
  */
-boolean hasSize();
+boolean hasRegionSize();
 /**
  * 
  * The size in bytes of the region
  * 
  *
- * optional uint64 size = 2;
+ * optional uint64 region_size = 2;
  */
-long getSize();
+long getRegionSize();
   }
   /**
* Protobuf type {@code hbase.pb.RegionSpaceUse}
@@ -10222,7 +10222,7 @@ public final class RegionServerStatusProtos {
   super(builder);
 }
 private RegionSpaceUse() {
-  size_ = 0L;
+  regionSize_ = 0L;
 }
 
 @java.lang.Override
@@ -10256,19 +10256,19 @@ public final class RegionServerStatusProtos {
 case 10: {
   
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder
 subBuilder = null;
   if (((bitField0_ & 0x0001) == 0x0001)) {
-subBuilder = region_.toBuilder();
+subBuilder = regionInfo_.toBuilder();
   }
-  region_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER,
 extensionRegistry);
+  regionInfo_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
-subBuilder.mergeFrom(region_);
-region_ = subBuilder.buildPartial();
+subBuilder.mergeFrom(regionInfo_);
+regionInfo_ = subBuilder.buildPartial();
   }
   bitField0_ |= 0x0001;
   break;
 }
 case 16: {
   bitField0_ |= 0x0002;
-  size_ = input.readUInt64();
+  regionSize_ = input.readUInt64();
   break;
 }
   }
@@ -10296,16 +10296,16 @@ public final class RegionServerStatusProtos {
 }
 
 private int bitField0_;
-public static final int REGION_FIELD_NUMBER = 1;
-private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
region_;
+public static final int REGION_INFO_FIELD_NUMBER = 1;
+private 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
regionInfo_;
 /**
  * 
  * A region identifier
  * 
  *
- * optional .hbase.pb.RegionInfo region = 1;
+ * optional .hbase.pb.RegionInfo region_info = 1;
  */
-public boolean hasRegion() {
+public boolean hasRegionInfo() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
@@ -10313,32 +10313,32 @@ public final class RegionServerStatusProtos {
  * A region identifier
  * 
  *
- * optional .hbase.pb.RegionInfo region = 1;
+ * optional .hbase.pb.RegionInfo region_info = 1;
  */
-public 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegion() {
-  return region_ == null ? 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()
 : region_;
+

[46/50] [abbrv] hbase git commit: HBASE-17981 Consolidate the space quota shell commands

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index e8a57e9..2435564 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -2210,7 +2210,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_num = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
   getReqNumFieldBuilder() {
 if (reqNumBuilder_ == null) {
   reqNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2328,7 +2328,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_size = 2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
   getReqSizeFieldBuilder() {
 if (reqSizeBuilder_ == null) {
   reqSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2446,7 +2446,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_num = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
   getWriteNumFieldBuilder() {
 if (writeNumBuilder_ == null) {
   writeNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2564,7 +2564,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_size = 4;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
   getWriteSizeFieldBuilder() {
 if (writeSizeBuilder_ == null) {
   writeSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2682,7 +2682,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota read_num = 5;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 

[33/50] [abbrv] hbase git commit: HBASE-17602 Reduce some quota chore periods/delays

2017-05-19 Thread elserj
HBASE-17602 Reduce some quota chore periods/delays


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9adf90e9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9adf90e9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9adf90e9

Branch: refs/heads/HBASE-16961
Commit: 9adf90e9a5353c9906a1fce513e7d01f2536e6ac
Parents: f952df3
Author: Josh Elser 
Authored: Tue Feb 7 11:21:08 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:25:20 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java  | 4 ++--
 .../org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9adf90e9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index b9f4592..7f894e4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -55,11 +55,11 @@ public class QuotaObserverChore extends ScheduledChore {
   private static final Log LOG = LogFactory.getLog(QuotaObserverChore.class);
   static final String QUOTA_OBSERVER_CHORE_PERIOD_KEY =
   "hbase.master.quotas.observer.chore.period";
-  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
+  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 1; // 1 
minutes in millis
 
   static final String QUOTA_OBSERVER_CHORE_DELAY_KEY =
   "hbase.master.quotas.observer.chore.delay";
-  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 15L; // 15 
seconds in millis
 
   static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY =
   "hbase.master.quotas.observer.chore.timeunit";

http://git-wip-us.apache.org/repos/asf/hbase/blob/9adf90e9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
index e1a2693..8587e79 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaRefresherChore.java
@@ -44,11 +44,11 @@ public class SpaceQuotaRefresherChore extends 
ScheduledChore {
 
   static final String POLICY_REFRESHER_CHORE_PERIOD_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.period";
-  static final int POLICY_REFRESHER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
+  static final int POLICY_REFRESHER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 1; // 1 
minute in millis
 
   static final String POLICY_REFRESHER_CHORE_DELAY_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.delay";
-  static final long POLICY_REFRESHER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final long POLICY_REFRESHER_CHORE_DELAY_DEFAULT = 1000L * 15L; // 15 
seconds in millis
 
   static final String POLICY_REFRESHER_CHORE_TIMEUNIT_KEY =
   "hbase.regionserver.quotas.policy.refresher.chore.timeunit";



[15/50] [abbrv] hbase git commit: HBASE-16999 Implement master and regionserver synchronization of quota state

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/f6cc45e5/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
new file mode 100644
index 000..4a7000c
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableSpaceQuotaViolationNotifier.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.ArgumentMatcher;
+
+/**
+ * Test case for {@link TableSpaceQuotaViolationNotifier}.
+ */
+@Category(SmallTests.class)
+public class TestTableSpaceQuotaViolationNotifier {
+
+  private TableSpaceQuotaViolationNotifier notifier;
+  private Connection conn;
+
+  @Before
+  public void setup() throws Exception {
+notifier = new TableSpaceQuotaViolationNotifier();
+conn = mock(Connection.class);
+notifier.initialize(conn);
+  }
+
+  @Test
+  public void testToViolation() throws Exception {
+final TableName tn = TableName.valueOf("inviolation");
+final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_INSERTS;
+final Table quotaTable = mock(Table.class);
+
when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable);
+
+final Put expectedPut = new Put(Bytes.toBytes("t." + 
tn.getNameAsString()));
+final SpaceQuota protoQuota = SpaceQuota.newBuilder()
+.setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(policy))
+.build();
+expectedPut.addColumn(Bytes.toBytes("u"), Bytes.toBytes("v"), 
protoQuota.toByteArray());
+
+notifier.transitionTableToViolation(tn, policy);
+
+verify(quotaTable).put(argThat(new SingleCellPutMatcher(expectedPut)));
+  }
+
+  @Test
+  public void testToObservance() throws Exception {
+final TableName tn = TableName.valueOf("notinviolation");
+final Table quotaTable = mock(Table.class);
+
when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable);
+
+final Delete expectedDelete = new Delete(Bytes.toBytes("t." + 
tn.getNameAsString()));
+expectedDelete.addColumn(Bytes.toBytes("u"), Bytes.toBytes("v"));
+
+notifier.transitionTableToObservance(tn);
+
+verify(quotaTable).delete(argThat(new 
SingleCellDeleteMatcher(expectedDelete)));
+  }
+
+  /**
+   * Parameterized for Puts.
+   */
+  private static class SingleCellPutMatcher extends 
SingleCellMutationMatcher {
+private SingleCellPutMatcher(Put expected) {
+  super(expected);
+}
+  }
+
+  /**
+   * Parameterized for Deletes.
+   */
+  private static class SingleCellDeleteMatcher extends 
SingleCellMutationMatcher {
+private SingleCellDeleteMatcher(Delete expected) {
+  super(expected);
+}
+  }
+
+  /**
+   * Quick hack to verify a Mutation with one column.
+   */
+  private static class SingleCellMutationMatcher extends ArgumentMatcher 
{
+private final Mutation expected;
+
+private SingleCellMutationMatcher(Mutation expected) {
+  

[20/50] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 8b127d9..973ac8c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -37,9 +37,8 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.quotas.QuotaViolationStore.ViolationState;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -54,51 +53,51 @@ import com.google.common.collect.Multimap;
 @InterfaceAudience.Private
 public class QuotaObserverChore extends ScheduledChore {
   private static final Log LOG = LogFactory.getLog(QuotaObserverChore.class);
-  static final String VIOLATION_OBSERVER_CHORE_PERIOD_KEY =
-  "hbase.master.quotas.violation.observer.chore.period";
-  static final int VIOLATION_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 
5 minutes in millis
+  static final String QUOTA_OBSERVER_CHORE_PERIOD_KEY =
+  "hbase.master.quotas.observer.chore.period";
+  static final int QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 
minutes in millis
 
-  static final String VIOLATION_OBSERVER_CHORE_DELAY_KEY =
-  "hbase.master.quotas.violation.observer.chore.delay";
-  static final long VIOLATION_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
+  static final String QUOTA_OBSERVER_CHORE_DELAY_KEY =
+  "hbase.master.quotas.observer.chore.delay";
+  static final long QUOTA_OBSERVER_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 
minute
 
-  static final String VIOLATION_OBSERVER_CHORE_TIMEUNIT_KEY =
-  "hbase.master.quotas.violation.observer.chore.timeunit";
-  static final String VIOLATION_OBSERVER_CHORE_TIMEUNIT_DEFAULT = 
TimeUnit.MILLISECONDS.name();
+  static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY =
+  "hbase.master.quotas.observer.chore.timeunit";
+  static final String QUOTA_OBSERVER_CHORE_TIMEUNIT_DEFAULT = 
TimeUnit.MILLISECONDS.name();
 
-  static final String VIOLATION_OBSERVER_CHORE_REPORT_PERCENT_KEY =
-  "hbase.master.quotas.violation.observer.report.percent";
-  static final double VIOLATION_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT= 0.95;
+  static final String QUOTA_OBSERVER_CHORE_REPORT_PERCENT_KEY =
+  "hbase.master.quotas.observer.report.percent";
+  static final double QUOTA_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT= 0.95;
 
   private final Connection conn;
   private final Configuration conf;
   private final MasterQuotaManager quotaManager;
   /*
-   * Callback that changes in quota violation are passed to.
+   * Callback that changes in quota snapshots are passed to.
*/
-  private final SpaceQuotaViolationNotifier violationNotifier;
+  private final SpaceQuotaSnapshotNotifier snapshotNotifier;
 
   /*
-   * Preserves the state of quota violations for tables and namespaces
+   * Preserves the state of quota snapshots for tables and namespaces
*/
-  private final Map tableQuotaViolationStates;
-  private final Map namespaceQuotaViolationStates;
+  private final Map tableQuotaSnapshots;
+  private final Map namespaceQuotaSnapshots;
 
   /*
-   * Encapsulates logic for moving tables/namespaces into or out of quota 
violation
+   * Encapsulates logic for tracking the state of a table/namespace WRT space 
quotas
*/
-  private QuotaViolationStore tableViolationStore;
-  private QuotaViolationStore namespaceViolationStore;
+  private QuotaSnapshotStore tableSnapshotStore;
+  private QuotaSnapshotStore namespaceSnapshotStore;
 
   public QuotaObserverChore(HMaster master) {
 this(
 master.getConnection(), master.getConfiguration(),
-master.getSpaceQuotaViolationNotifier(), 
master.getMasterQuotaManager(),
+master.getSpaceQuotaSnapshotNotifier(), master.getMasterQuotaManager(),
 master);
   }
 
   QuotaObserverChore(
-  Connection conn, Configuration conf, SpaceQuotaViolationNotifier 
violationNotifier,
+  Connection conn, Configuration conf, SpaceQuotaSnapshotNotifier 
snapshotNotifier,
   

[40/50] [abbrv] hbase git commit: HBASE-17976 Remove stability annotation from public audience class

2017-05-19 Thread elserj
HBASE-17976 Remove stability annotation from public audience class


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56f1cd6c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56f1cd6c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56f1cd6c

Branch: refs/heads/HBASE-16961
Commit: 56f1cd6ce1ecd8d703cef3c7b6b76bba82df03b7
Parents: 97dcaff
Author: Josh Elser 
Authored: Fri Apr 28 16:51:17 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:28:50 2017 -0400

--
 .../java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56f1cd6c/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
index 34d2542..023e855 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.hbase.quotas;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 
 /**
  * Enumeration that represents the action HBase will take when a space quota 
is violated.
@@ -26,7 +25,6 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
  * namespace, it is treated as a collection of tables (all tables are subject 
to the same policy).
  */
 @InterfaceAudience.Public
-@InterfaceStability.Evolving
 public enum SpaceViolationPolicy {
   /**
* Disables the table(s).



[23/50] [abbrv] hbase git commit: HBASE-17259 API to remove space quotas on a table/namespace

2017-05-19 Thread elserj
HBASE-17259 API to remove space quotas on a table/namespace


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b71c9e13
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b71c9e13
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b71c9e13

Branch: refs/heads/HBASE-16961
Commit: b71c9e13d5dcab702e5445513d1c2986e2b9e62c
Parents: 6f2bee4
Author: Josh Elser 
Authored: Wed Jan 11 12:47:06 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:19:19 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  |  22 +++
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   6 +-
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  44 -
 .../hbase/quotas/TestQuotaSettingsFactory.java  |  20 +++
 .../shaded/protobuf/generated/QuotaProtos.java  | 157 +++---
 .../src/main/protobuf/Quota.proto   |   1 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 159 ---
 hbase-protocol/src/main/protobuf/Quota.proto|   1 +
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   9 +-
 .../hadoop/hbase/quotas/TestQuotaAdmin.java |  49 +-
 10 files changed, 423 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b71c9e13/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 7f1c180..184277d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -316,6 +316,17 @@ public class QuotaSettingsFactory {
   }
 
   /**
+   * Creates a {@link QuotaSettings} object to remove the FileSystem space 
quota for the given
+   * table.
+   *
+   * @param tableName The name of the table to remove the quota for.
+   * @return A {@link QuotaSettings} object.
+   */
+  public static QuotaSettings removeTableSpaceLimit(TableName tableName) {
+return new SpaceLimitSettings(tableName, true);
+  }
+
+  /**
* Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given
* namespace to the given size in bytes. When the space usage is exceeded by 
all tables in the
* namespace, the provided {@link SpaceViolationPolicy} is enacted on all 
tables in the namespace.
@@ -329,4 +340,15 @@ public class QuotaSettingsFactory {
   final String namespace, long sizeLimit, final SpaceViolationPolicy 
violationPolicy) {
 return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
   }
+
+  /**
+   * Creates a {@link QuotaSettings} object to remove the FileSystem space 
quota for the given
+* namespace.
+   *
+   * @param namespace The namespace to remove the quota on.
+   * @return A {@link QuotaSettings} object.
+   */
+  public static QuotaSettings removeNamespaceSpaceLimit(String namespace) {
+return new SpaceLimitSettings(namespace, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b71c9e13/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 66535b2..ce4cd04 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -422,7 +422,11 @@ public class QuotaTableUtil {
 boolean hasSettings = false;
 hasSettings |= quotas.hasThrottle();
 hasSettings |= quotas.hasBypassGlobals();
-hasSettings |= quotas.hasSpace();
+// Only when there is a space quota, make sure there's actually both 
fields provided
+// Otherwise, it's a noop.
+if (quotas.hasSpace()) {
+  hasSettings |= (quotas.getSpace().hasSoftLimit() && 
quotas.getSpace().hasViolationPolicy());
+}
 return !hasSettings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b71c9e13/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
index e54882e..8ff7623 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
+++ 

[39/50] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

2017-05-19 Thread elserj
HBASE-17002 JMX metrics and some UI additions for space quotas


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a2ec0bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a2ec0bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a2ec0bd

Branch: refs/heads/HBASE-16961
Commit: 3a2ec0bd186262fce5beb6d1ce6ba2a6d70b15ad
Parents: ea3c042
Author: Josh Elser 
Authored: Wed Feb 15 14:24:57 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:28:49 2017 -0400

--
 .../hbase/client/ConnectionImplementation.java  |8 +
 .../hadoop/hbase/client/QuotaStatusCalls.java   |   39 +-
 .../client/ShortCircuitMasterConnection.java|8 +
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   41 +
 .../hbase/shaded/protobuf/RequestConverter.java |   11 +
 .../hbase/master/MetricsMasterQuotaSource.java  |   75 +
 .../master/MetricsMasterQuotaSourceFactory.java |   26 +
 .../hbase/master/MetricsMasterWrapper.java  |   13 +
 .../MetricsRegionServerQuotaSource.java |   54 +
 .../MetricsMasterQuotaSourceFactoryImpl.java|   36 +
 .../master/MetricsMasterQuotaSourceImpl.java|  129 +
 ...hadoop.hbase.master.MetricsMasterQuotaSource |   18 +
 ...hbase.master.MetricsMasterQuotaSourceFactory |   18 +
 .../shaded/protobuf/generated/MasterProtos.java |   93 +-
 .../shaded/protobuf/generated/QuotaProtos.java  | 3099 +-
 .../src/main/protobuf/Master.proto  |6 +-
 .../src/main/protobuf/Quota.proto   |   17 +
 .../org/apache/hadoop/hbase/master/HMaster.java |2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   38 +
 .../hadoop/hbase/master/MetricsMaster.java  |   42 +
 .../hbase/master/MetricsMasterWrapperImpl.java  |   42 +-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |   92 +-
 .../resources/hbase-webapps/master/table.jsp|   59 +
 .../hbase/master/TestMasterMetricsWrapper.java  |   17 +
 .../hbase/quotas/TestQuotaStatusRPCs.java   |   83 +
 25 files changed, 4032 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 5242efc..e5f5694 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -92,6 +92,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
@@ -1759,6 +1761,12 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   throws ServiceException {
 return stub.getSpaceQuotaRegionSizes(controller, request);
   }
+
+  @Override
+  public GetQuotaStatesResponse getQuotaStates(
+  RpcController controller, GetQuotaStatesRequest request) throws 
ServiceException {
+return stub.getQuotaStates(controller, request);
+  }
 };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
index f0f385d..af36d1e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 

[44/50] [abbrv] hbase git commit: HBASE-17955 Various reviewboard improvements to space quota work

2017-05-19 Thread elserj
HBASE-17955 Various reviewboard improvements to space quota work

Most notable change is to cache SpaceViolationPolicyEnforcement objects
in the write path. When a table has no quota or there is not SpaceQuotaSnapshot
for that table (yet), we want to avoid creating lots of
SpaceViolationPolicyEnforcement instances, caching one instance
instead. This will help reduce GC pressure.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/97dcaffa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/97dcaffa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/97dcaffa

Branch: refs/heads/HBASE-16961
Commit: 97dcaffa9831c3a49a70457e57390667a41fe292
Parents: a69b68e
Author: Josh Elser 
Authored: Tue Apr 18 16:43:40 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:28:50 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  |  10 +-
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   7 +-
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  26 +-
 .../hadoop/hbase/quotas/SpaceQuotaSnapshot.java |  34 +-
 .../hbase/quotas/SpaceViolationPolicy.java  |   5 +-
 .../hbase/master/MetricsMasterQuotaSource.java  |  13 +-
 .../MetricsRegionServerQuotaSource.java |  10 +-
 .../MetricsMasterQuotaSourceFactoryImpl.java|   2 +-
 .../master/MetricsMasterQuotaSourceImpl.java|  10 +-
 .../shaded/protobuf/generated/QuotaProtos.java  | 637 ++-
 .../generated/RegionServerStatusProtos.java | 340 +-
 .../src/main/protobuf/Quota.proto   |   8 +-
 .../src/main/protobuf/RegionServerStatus.proto  |   4 +-
 .../hbase/protobuf/generated/QuotaProtos.java   | 463 +++---
 hbase-protocol/src/main/protobuf/Quota.proto|   8 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   9 +-
 .../hadoop/hbase/master/MetricsMaster.java  |  13 +-
 .../hbase/master/MetricsMasterWrapperImpl.java  |   4 +-
 .../hbase/quotas/ActivePolicyEnforcement.java   |  54 +-
 .../quotas/FileSystemUtilizationChore.java  |   4 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   8 +-
 .../hbase/quotas/MasterSpaceQuotaObserver.java  |   4 +-
 .../quotas/NamespaceQuotaSnapshotStore.java |   2 +-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |  62 +-
 .../quotas/RegionServerSpaceQuotaManager.java   |  16 +-
 .../hbase/quotas/SpaceLimitingException.java|   6 +-
 .../hbase/quotas/SpaceQuotaRefresherChore.java  |   2 +-
 .../SpaceViolationPolicyEnforcementFactory.java |  20 +-
 .../hbase/quotas/TableQuotaSnapshotStore.java   |   2 +-
 .../AbstractViolationPolicyEnforcement.java |  45 +-
 ...LoadVerifyingViolationPolicyEnforcement.java |  50 --
 .../DefaultViolationPolicyEnforcement.java  |  90 +++
 .../DisableTableViolationPolicyEnforcement.java |   2 +-
 ...ssingSnapshotViolationPolicyEnforcement.java |  63 ++
 .../NoInsertsViolationPolicyEnforcement.java|   2 +-
 .../NoWritesViolationPolicyEnforcement.java |   2 +-
 .../hbase/regionserver/CompactSplitThread.java  |   2 +-
 .../hbase/regionserver/HRegionServer.java   |   4 +-
 .../hbase/regionserver/RSRpcServices.java   |   9 +-
 .../resources/hbase-webapps/master/table.jsp|   8 +-
 .../hbase/quotas/SpaceQuotaHelperForTests.java  |  66 +-
 .../quotas/TestActivePolicyEnforcement.java |  62 +-
 .../quotas/TestMasterSpaceQuotaObserver.java|  28 +-
 .../TestQuotaObserverChoreRegionReports.java|   6 +-
 .../TestQuotaObserverChoreWithMiniCluster.java  |  31 +-
 .../hbase/quotas/TestQuotaStatusRPCs.java   |  15 +-
 .../TestRegionServerSpaceQuotaManager.java  |   4 +-
 .../hadoop/hbase/quotas/TestSpaceQuotas.java|  30 +-
 .../TestTableSpaceQuotaViolationNotifier.java   |   8 +-
 ...kLoadCheckingViolationPolicyEnforcement.java |   2 +-
 .../TestRegionServerRegionSpaceUseReport.java   |   4 +-
 52 files changed, 1280 insertions(+), 1040 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 184277d..a99235f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -127,11 +127,11 @@ public class QuotaSettingsFactory {
   }
 
   static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota 
protoQuota) {
-if ((null == table && null == namespace) || (null != table && 

[41/50] [abbrv] hbase git commit: HBASE-17955 Various reviewboard improvements to space quota work

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java
index a313fa1..c558b26 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hbase.quotas;
 
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
 
@@ -28,7 +29,12 @@ import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
 
 /**
  * A class to ease dealing with tables that have and do not have violation 
policies
- * being enforced in a uniform manner. Immutable.
+ * being enforced. This class is immutable, expect for {@code 
locallyCachedPolicies}.
+ *
+ * The {@code locallyCachedPolicies} are mutable given the current {@code 
activePolicies}
+ * and {@code snapshots}. It is expected that when a new instance of this 
class is
+ * instantiated, we also want to invalidate those previously cached policies 
(as they
+ * may now be invalidate if we received new quota usage information).
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -36,12 +42,23 @@ public class ActivePolicyEnforcement {
   private final Map activePolicies;
   private final Map snapshots;
   private final RegionServerServices rss;
+  private final SpaceViolationPolicyEnforcementFactory factory;
+  private final Map 
locallyCachedPolicies;
 
   public 
ActivePolicyEnforcement(Map 
activePolicies,
   Map snapshots, RegionServerServices rss) {
+this(activePolicies, snapshots, rss, 
SpaceViolationPolicyEnforcementFactory.getInstance());
+  }
+
+  public 
ActivePolicyEnforcement(Map 
activePolicies,
+  Map snapshots, RegionServerServices rss,
+  SpaceViolationPolicyEnforcementFactory factory) {
 this.activePolicies = activePolicies;
 this.snapshots = snapshots;
 this.rss = rss;
+this.factory = factory;
+// Mutable!
+this.locallyCachedPolicies = new HashMap<>();
   }
 
   /**
@@ -65,16 +82,25 @@ public class ActivePolicyEnforcement {
*/
   public SpaceViolationPolicyEnforcement getPolicyEnforcement(TableName 
tableName) {
 SpaceViolationPolicyEnforcement policy = 
activePolicies.get(Objects.requireNonNull(tableName));
-if (null == policy) {
-  synchronized (activePolicies) {
-// If we've never seen a snapshot, assume no use, and infinite limit
-SpaceQuotaSnapshot snapshot = snapshots.get(tableName);
-if (null == snapshot) {
-  snapshot = SpaceQuotaSnapshot.getNoSuchSnapshot();
+if (policy == null) {
+  synchronized (locallyCachedPolicies) {
+// When we don't have an policy enforcement for the table, there could 
be one of two cases:
+//  1) The table has no quota defined
+//  2) The table is not in violation of its quota
+// In both of these cases, we want to make sure that access remains 
fast and we minimize
+// object creation. We can accomplish this by locally caching policies 
instead of creating
+// a new instance of the policy each time.
+policy = locallyCachedPolicies.get(tableName);
+// We have already created/cached the enforcement, use it again. 
`activePolicies` and
+// `snapshots` are immutable, thus this policy is valid for the 
lifetime of `this`.
+if (policy != null) {
+  return policy;
 }
-// Create the default policy and cache it
-return 
SpaceViolationPolicyEnforcementFactory.getInstance().createWithoutViolation(
-rss, tableName, snapshot);
+// Create a PolicyEnforcement for this table and snapshot. The 
snapshot may be null
+// which is OK.
+policy = factory.createWithoutViolation(rss, tableName, 
snapshots.get(tableName));
+// Cache the policy we created
+locallyCachedPolicies.put(tableName, policy);
   }
 }
 return policy;
@@ -87,6 +113,14 @@ public class ActivePolicyEnforcement {
 return Collections.unmodifiableMap(activePolicies);
   }
 
+  /**
+   * Returns an unmodifiable version of the policy enforcements that were 
cached because they are
+   * not in violation of their quota.
+   */
+  Map getLocallyCachedPolicies() {
+return Collections.unmodifiableMap(locallyCachedPolicies);
+ 

[05/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/0224ea1e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 3560de0..b5b41f3 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -842,7 +842,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -960,7 +960,7 @@ public final class MasterProtos {
* required .hbase.pb.ColumnFamilySchema column_families = 
2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
   getColumnFamiliesFieldBuilder() {
 if (columnFamiliesBuilder_ == null) {
   columnFamiliesBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2257,7 +2257,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3622,7 +3622,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3740,7 +3740,7 @@ public final class MasterProtos {
* required .hbase.pb.ColumnFamilySchema column_families = 
2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
 
+  

[32/50] [abbrv] hbase git commit: HBASE-17516 Correctly handle case where table and NS quotas both apply

2017-05-19 Thread elserj
HBASE-17516 Correctly handle case where table and NS quotas both apply

The logic surrounding when a table and namespace quota both apply
to a table was incorrect, leading to a case where a table quota
violation which should have fired did not because of the less-strict
namespace quota.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f952df39
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f952df39
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f952df39

Branch: refs/heads/HBASE-16961
Commit: f952df39f23e3f8a0493016637190bb32a12bd61
Parents: df67080
Author: Josh Elser 
Authored: Wed Feb 22 18:32:55 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:25:20 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaObserverChore.java | 10 ++-
 .../TestQuotaObserverChoreWithMiniCluster.java  | 66 
 .../hbase/quotas/TestQuotaStatusRPCs.java   | 21 ++-
 .../hadoop/hbase/quotas/TestSpaceQuotas.java| 32 +-
 4 files changed, 97 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f952df39/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
index 973ac8c..b9f4592 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java
@@ -287,7 +287,8 @@ public class QuotaObserverChore extends ScheduledChore {
   // We want to have a policy of "NONE", moving out of violation
   if (!targetStatus.isInViolation()) {
 for (TableName tableInNS : tablesByNamespace.get(namespace)) {
-  if 
(!tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
+  // If there is a quota on this table in violation
+  if 
(tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
 // Table-level quota violation policy is being applied here.
 if (LOG.isTraceEnabled()) {
   LOG.trace("Not activating Namespace violation policy because a 
Table violation"
@@ -298,16 +299,21 @@ public class QuotaObserverChore extends ScheduledChore {
 this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
   }
 }
+  // We want to move into violation at the NS level
   } else {
 // Moving tables in the namespace into violation or to a different 
violation policy
 for (TableName tableInNS : tablesByNamespace.get(namespace)) {
-  if 
(tableSnapshotStore.getCurrentState(tableInNS).getQuotaStatus().isInViolation())
 {
+  final SpaceQuotaSnapshot tableQuotaSnapshot =
+tableSnapshotStore.getCurrentState(tableInNS);
+  final boolean hasTableQuota = QuotaSnapshotStore.NO_QUOTA != 
tableQuotaSnapshot;
+  if (hasTableQuota && 
tableQuotaSnapshot.getQuotaStatus().isInViolation()) {
 // Table-level quota violation policy is being applied here.
 if (LOG.isTraceEnabled()) {
   LOG.trace("Not activating Namespace violation policy because a 
Table violation"
   + " policy is already in effect for " + tableInNS);
 }
   } else {
+// No table quota present or a table quota present that is not in 
violation
 LOG.info(tableInNS + " moving into violation of namespace space 
quota with policy " + targetStatus.getPolicy());
 this.snapshotNotifier.transitionTable(tableInNS, targetSnapshot);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f952df39/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
index 943c898..63198a8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreWithMiniCluster.java
@@ -193,40 +193,42 @@ public class TestQuotaObserverChoreWithMiniCluster {
 
 helper.writeData(tn1, 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE);
 admin.flush(tn1);
-Map violatedQuotas = 

[29/50] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index a567f97..3760619 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -601,7 +601,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionSpecifier region = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
   getRegionFieldBuilder() {
 if (regionBuilder_ == null) {
   regionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -1450,7 +1450,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionInfo region_info = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
   getRegionInfoFieldBuilder() {
 if (regionInfoBuilder_ == null) {
   regionInfoBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2204,7 +2204,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionSpecifier region = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
 
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
   getRegionFieldBuilder() {
 if (regionBuilder_ == null) {
   regionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3281,7 +3281,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-
java.util.List
 
+
java.util.List
 getRegionInfoList();
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
@@ -3294,7 +3294,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-java.util.List
 
+java.util.List
 getRegionInfoOrBuilderList();
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
@@ -3392,7 +3392,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-public java.util.List
 
+public java.util.List
 getRegionInfoOrBuilderList() {
   return regionInfo_;
 }
@@ -3699,7 +3699,7 @@ public final class AdminProtos {
   regionInfoBuilder_ = null;
   regionInfo_ = other.regionInfo_;
   bitField0_ = (bitField0_ & ~0x0001);
-  regionInfoBuilder_ = 
+  regionInfoBuilder_ =
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
 ?
getRegionInfoFieldBuilder() : null;
 } else {
@@ -3935,7 +3935,7 @@ public final class AdminProtos {
   /**
* repeated .hbase.pb.RegionInfo region_info = 1;
*/
-  public java.util.List
 
+  public java.util.List
getRegionInfoOrBuilderList() {
 

[47/50] [abbrv] hbase git commit: HBASE-17981 Consolidate the space quota shell commands

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/3561b115/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 3760619..711b9c8 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -601,7 +601,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionSpecifier region = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
 
   getRegionFieldBuilder() {
 if (regionBuilder_ == null) {
   regionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -1450,7 +1450,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionInfo region_info = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
 
   getRegionInfoFieldBuilder() {
 if (regionInfoBuilder_ == null) {
   regionInfoBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2204,7 +2204,7 @@ public final class AdminProtos {
* required .hbase.pb.RegionSpecifier region = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
 
   getRegionFieldBuilder() {
 if (regionBuilder_ == null) {
   regionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3281,7 +3281,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-
java.util.List
+
java.util.List
 
 getRegionInfoList();
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
@@ -3294,7 +3294,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-java.util.List
+java.util.List
 
 getRegionInfoOrBuilderList();
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
@@ -3392,7 +3392,7 @@ public final class AdminProtos {
 /**
  * repeated .hbase.pb.RegionInfo region_info = 1;
  */
-public java.util.List
+public java.util.List
 
 getRegionInfoOrBuilderList() {
   return regionInfo_;
 }
@@ -3699,7 +3699,7 @@ public final class AdminProtos {
   regionInfoBuilder_ = null;
   regionInfo_ = other.regionInfo_;
   bitField0_ = (bitField0_ & ~0x0001);
-  regionInfoBuilder_ =
+  regionInfoBuilder_ = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
 ?
getRegionInfoFieldBuilder() : null;
 } else {
@@ -3935,7 +3935,7 @@ public final class AdminProtos {
   /**
* repeated .hbase.pb.RegionInfo region_info = 1;
*/
-  public java.util.List
+  public java.util.List
 
getRegionInfoOrBuilderList() {
 

[36/50] [abbrv] hbase git commit: HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

2017-05-19 Thread elserj
HBASE-17447 Implement a MasterObserver for automatically deleting space quotas

When a table or namespace is deleted, it would be nice to automatically
delete the quota on said table/NS. It's possible that not all people
would want this functionality so we can leave it up to the user to
configure this Observer.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a69b68e9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a69b68e9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a69b68e9

Branch: refs/heads/HBASE-16961
Commit: a69b68e94d714f1c36837ad22d7088ad9829d813
Parents: 6007442
Author: Josh Elser 
Authored: Thu Mar 16 18:54:01 2017 -0400
Committer: Josh Elser 
Committed: Fri May 19 12:28:49 2017 -0400

--
 .../hbase/quotas/MasterSpaceQuotaObserver.java  |  85 ++
 .../quotas/TestMasterSpaceQuotaObserver.java| 169 +++
 src/main/asciidoc/_chapters/ops_mgt.adoc|  17 ++
 3 files changed, 271 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a69b68e9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
new file mode 100644
index 000..a3abf32
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+
+/**
+ * An observer to automatically delete space quotas when a table/namespace
+ * are deleted.
+ */
+@InterfaceAudience.Private
+public class MasterSpaceQuotaObserver implements MasterObserver {
+  private CoprocessorEnvironment cpEnv;
+  private Configuration conf;
+  private boolean quotasEnabled = false;
+
+  @Override
+  public void start(CoprocessorEnvironment ctx) throws IOException {
+this.cpEnv = ctx;
+this.conf = cpEnv.getConfiguration();
+this.quotasEnabled = QuotaUtil.isQuotaEnabled(conf);
+  }
+
+  @Override
+  public void postDeleteTable(
+  ObserverContext ctx, TableName tableName) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getTableQuota(master.getConnection(), tableName);
+if (null != quotas && quotas.hasSpace()) {
+  QuotaSettings settings = 
QuotaSettingsFactory.removeTableSpaceLimit(tableName);
+  try (Admin admin = conn.getAdmin()) {
+admin.setQuota(settings);
+  }
+}
+  }
+
+  @Override
+  public void postDeleteNamespace(
+  ObserverContext ctx, String namespace) 
throws IOException {
+// Do nothing if quotas aren't enabled
+if (!quotasEnabled) {
+  return;
+}
+final MasterServices master = ctx.getEnvironment().getMasterServices();
+final Connection conn = master.getConnection();
+Quotas quotas = QuotaUtil.getNamespaceQuota(master.getConnection(), 
namespace);
+if (null != quotas && quotas.hasSpace()) {
+  

[10/50] [abbrv] hbase git commit: HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota table (Josh Elser)

2017-05-19 Thread elserj
HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota 
table (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a13dfa0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a13dfa0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a13dfa0

Branch: refs/heads/HBASE-16961
Commit: 9a13dfa08773e99572559e345bbc1fe32e0d5359
Parents: 7813084
Author: tedyu 
Authored: Sat Dec 3 14:30:48 2016 -0800
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaTableUtil.java |  13 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  30 +
 .../hadoop/hbase/quotas/TestQuotaAdmin.java | 125 ++-
 3 files changed, 165 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a13dfa0/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index c44090f..8ef4f08 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -53,7 +53,9 @@ import org.apache.hadoop.hbase.util.Strings;
  * 
  * ROW-KEY  FAM/QUALDATA
  *   n.namespace q:s global-quotas
+ *   n.namespace u:dusize in bytes
  *   t.table q:s global-quotas
+ *   t.table u:dusize in bytes
  *   u.user  q:s global-quotas
  *   u.user  q:s.table table-quotas
  *   u.user  q:s.ns:   namespace-quotas
@@ -72,6 +74,7 @@ public class QuotaTableUtil {
   protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = 
Bytes.toBytes("s.");
+  protected static final byte[] QUOTA_QUALIFIER_DISKUSAGE = 
Bytes.toBytes("du");
   protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = 
Bytes.toBytes("u.");
   protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = 
Bytes.toBytes("t.");
   protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = 
Bytes.toBytes("n.");
@@ -330,11 +333,16 @@ public class QuotaTableUtil {
*  Quotas protobuf helpers
*/
   protected static Quotas quotasFromData(final byte[] data) throws IOException 
{
+return quotasFromData(data, 0, data.length);
+  }
+
+  protected static Quotas quotasFromData(
+  final byte[] data, int offset, int length) throws IOException {
 int magicLen = ProtobufMagic.lengthOfPBMagic();
-if (!ProtobufMagic.isPBMagicPrefix(data, 0, magicLen)) {
+if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) {
   throw new IOException("Missing pb magic prefix");
 }
-return Quotas.parseFrom(new ByteArrayInputStream(data, magicLen, 
data.length - magicLen));
+return Quotas.parseFrom(new ByteArrayInputStream(data, offset + magicLen, 
length - magicLen));
   }
 
   protected static byte[] quotasToData(final Quotas data) throws IOException {
@@ -348,6 +356,7 @@ public class QuotaTableUtil {
 boolean hasSettings = false;
 hasSettings |= quotas.hasThrottle();
 hasSettings |= quotas.hasBypassGlobals();
+hasSettings |= quotas.hasSpace();
 return !hasSettings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a13dfa0/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 5dab2e3..1469268 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
 import 

[19/50] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
new file mode 100644
index 000..6b754b9
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicyEnforcementFactory.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
+import 
org.apache.hadoop.hbase.quotas.policies.BulkLoadVerifyingViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.DisableTableViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoInsertsViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoWritesCompactionsViolationPolicyEnforcement;
+import 
org.apache.hadoop.hbase.quotas.policies.NoWritesViolationPolicyEnforcement;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+
+/**
+ * A factory class for instantiating {@link SpaceViolationPolicyEnforcement} 
instances.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SpaceViolationPolicyEnforcementFactory {
+
+  private static final SpaceViolationPolicyEnforcementFactory INSTANCE =
+  new SpaceViolationPolicyEnforcementFactory();
+
+  private SpaceViolationPolicyEnforcementFactory() {}
+
+  /**
+   * Returns an instance of this factory.
+   */
+  public static SpaceViolationPolicyEnforcementFactory getInstance() {
+return INSTANCE;
+  }
+
+  /**
+   * Constructs the appropriate {@link SpaceViolationPolicyEnforcement} for 
tables that are
+   * in violation of their space quota.
+   */
+  public SpaceViolationPolicyEnforcement create(
+  RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot 
snapshot) {
+SpaceViolationPolicyEnforcement enforcement;
+SpaceQuotaStatus status = snapshot.getQuotaStatus();
+if (!status.isInViolation()) {
+  throw new IllegalArgumentException(tableName + " is not in violation. 
Snapshot=" + snapshot);
+}
+switch (status.getPolicy()) {
+  case DISABLE:
+enforcement = new DisableTableViolationPolicyEnforcement();
+break;
+  case NO_WRITES_COMPACTIONS:
+enforcement = new NoWritesCompactionsViolationPolicyEnforcement();
+break;
+  case NO_WRITES:
+enforcement = new NoWritesViolationPolicyEnforcement();
+break;
+  case NO_INSERTS:
+enforcement = new NoInsertsViolationPolicyEnforcement();
+break;
+  default:
+throw new IllegalArgumentException("Unhandled SpaceViolationPolicy: " 
+ status.getPolicy());
+}
+enforcement.initialize(rss, tableName, snapshot);
+return enforcement;
+  }
+
+  /**
+   * Creates the "default" {@link SpaceViolationPolicyEnforcement} for a table 
that isn't in
+   * violation. This is used to have uniform policy checking for tables in and 
not quotas.
+   */
+  public SpaceViolationPolicyEnforcement createWithoutViolation(
+  RegionServerServices rss, TableName tableName, SpaceQuotaSnapshot 
snapshot) {
+SpaceQuotaStatus status = snapshot.getQuotaStatus();
+if (status.isInViolation()) {
+  throw new IllegalArgumentException(
+  tableName + " is in violation. Logic error. Snapshot=" + snapshot);
+}
+BulkLoadVerifyingViolationPolicyEnforcement enforcement = new 
BulkLoadVerifyingViolationPolicyEnforcement();
+enforcement.initialize(rss, tableName, snapshot);
+return enforcement;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java

[01/50] [abbrv] hbase git commit: HBASE-17286 Add goal to remote-resources plugin [Forced Update!]

2017-05-19 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/HBASE-16961 ee23e85d3 -> 97bfe34af (forced update)


HBASE-17286 Add goal to remote-resources plugin

With Apache parent pom v12 our remote-resources-plugin execution id was
shadowing the parent declaration, and our configuration would get run to
aggregate LICENSE files correctly. When upgrading to v18, apache changed
the execution id, so our configuration no longer gets used.

Add an explicit goal to our usage of the remote-resources-plugin and
change the name to something more descriptive and less likely to
conflict (either intentionally or not).

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c728b0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c728b0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c728b0f

Branch: refs/heads/HBASE-16961
Commit: 5c728b0f0c84a7a30ae1618e611c5ebd836e04b9
Parents: 3fe4b28
Author: Mike Drob 
Authored: Thu May 18 15:19:21 2017 -0700
Committer: Josh Elser 
Committed: Fri May 19 11:37:49 2017 -0400

--
 hbase-assembly/pom.xml | 5 -
 hbase-shaded/pom.xml   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c728b0f/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 19bf5f0..79d154d 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -44,7 +44,10 @@
 1.5
 
   
-default
+aggregate-licenses
+
+  process
+
 
   
 ${build.year}

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c728b0f/hbase-shaded/pom.xml
--
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index 10edf42..520eb61 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -72,7 +72,10 @@
   1.5
   
 
-  default
+  aggregate-licenses
+  
+process
+  
   
 
   ${build.year}



[17/50] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
new file mode 100644
index 000..ec8f1bf
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/BaseViolationPolicyEnforcement.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas.policies;
+
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class BaseViolationPolicyEnforcement {
+
+  static final Append APPEND = new Append(Bytes.toBytes("foo"));
+  static final Delete DELETE = new Delete(Bytes.toBytes("foo"));
+  static final Increment INCREMENT = new Increment(Bytes.toBytes("foo"));
+  static final Put PUT = new Put(Bytes.toBytes("foo"));
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
new file mode 100644
index 000..abe1b9d
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/policies/TestBulkLoadCheckingViolationPolicyEnforcement.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas.policies;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
+import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus;
+import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBulkLoadCheckingViolationPolicyEnforcement {
+
+  FileSystem fs;
+  RegionServerServices rss;
+  TableName tableName;
+  SpaceViolationPolicyEnforcement policy;
+
+  @Before
+  public void setup() {
+fs = mock(FileSystem.class);
+rss = mock(RegionServerServices.class);
+tableName = TableName.valueOf("foo");
+policy = new BulkLoadVerifyingViolationPolicyEnforcement();
+  }
+
+  @Test
+  public void testFilesUnderLimit() throws Exception {
+final List paths = new ArrayList<>();
+final List 

[26/50] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index e90c934..c70b736 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10429,7 +10429,7 @@ public final class RegionServerStatusProtos {
 return memoizedHashCode;
   }
   int hash = 41;
-  hash = (19 * hash) + getDescriptorForType().hashCode();
+  hash = (19 * hash) + getDescriptor().hashCode();
   if (hasRegion()) {
 hash = (37 * hash) + REGION_FIELD_NUMBER;
 hash = (53 * hash) + getRegion().hashCode();
@@ -10824,7 +10824,7 @@ public final class RegionServerStatusProtos {
* optional .hbase.pb.RegionInfo region = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
 
   getRegionFieldBuilder() {
 if (regionBuilder_ == null) {
   regionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -10940,7 +10940,7 @@ public final class RegionServerStatusProtos {
 /**
  * repeated .hbase.pb.RegionSpaceUse space_use = 1;
  */
-
java.util.List
+
java.util.List
 
 getSpaceUseList();
 /**
  * repeated .hbase.pb.RegionSpaceUse space_use = 1;
@@ -10953,7 +10953,7 @@ public final class RegionServerStatusProtos {
 /**
  * repeated .hbase.pb.RegionSpaceUse space_use = 1;
  */
-java.util.List
+java.util.List
 
 getSpaceUseOrBuilderList();
 /**
  * repeated .hbase.pb.RegionSpaceUse space_use = 1;
@@ -11056,7 +11056,7 @@ public final class RegionServerStatusProtos {
 /**
  * repeated .hbase.pb.RegionSpaceUse space_use = 1;
  */
-public java.util.List
+public java.util.List
 
 getSpaceUseOrBuilderList() {
   return spaceUse_;
 }
@@ -11142,7 +11142,7 @@ public final class RegionServerStatusProtos {
 return memoizedHashCode;
   }
   int hash = 41;
-  hash = (19 * hash) + getDescriptorForType().hashCode();
+  hash = (19 * hash) + getDescriptor().hashCode();
   if (getSpaceUseCount() > 0) {
 hash = (37 * hash) + SPACE_USE_FIELD_NUMBER;
 hash = (53 * hash) + getSpaceUseList().hashCode();
@@ -11368,7 +11368,7 @@ public final class RegionServerStatusProtos {
   spaceUseBuilder_ = null;
   spaceUse_ = other.spaceUse_;
   bitField0_ = (bitField0_ & ~0x0001);
-  spaceUseBuilder_ =
+  spaceUseBuilder_ = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
 ?
getSpaceUseFieldBuilder() : null;
 } else {
@@ -11604,7 +11604,7 @@ public final class RegionServerStatusProtos {
   /**
* repeated .hbase.pb.RegionSpaceUse space_use = 1;
*/
-  public java.util.List
+  public java.util.List
 
getSpaceUseOrBuilderList() {
 if (spaceUseBuilder_ != null) {
   return spaceUseBuilder_.getMessageOrBuilderList();
@@ -11630,12 +11630,12 @@ public final class RegionServerStatusProtos {
   /**
* repeated .hbase.pb.RegionSpaceUse space_use = 1;
*/
-  public 
java.util.List
+  public 
java.util.List
 
getSpaceUseBuilderList() {
 return getSpaceUseFieldBuilder().getBuilderList();
   }
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse,
 

[16/50] [abbrv] hbase git commit: HBASE-16999 Implement master and regionserver synchronization of quota state

2017-05-19 Thread elserj
HBASE-16999 Implement master and regionserver synchronization of quota state

* Implement the RegionServer reading violation from the quota table
* Implement the Master reporting violations to the quota table
* RegionServers need to track its enforced policies


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6cc45e5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6cc45e5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6cc45e5

Branch: refs/heads/HBASE-16961
Commit: f6cc45e530e677dc3e1e4425cf34398185f71b7a
Parents: 851fb37
Author: Josh Elser 
Authored: Fri Nov 18 15:38:19 2016 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:17:45 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaTableUtil.java |  92 -
 .../org/apache/hadoop/hbase/master/HMaster.java |  35 +++-
 .../hadoop/hbase/quotas/QuotaObserverChore.java |   5 +-
 .../hbase/quotas/RegionServerQuotaManager.java  | 200 ---
 .../quotas/RegionServerRpcQuotaManager.java | 200 +++
 .../quotas/RegionServerSpaceQuotaManager.java   | 169 
 .../quotas/SpaceQuotaViolationNotifier.java |  16 +-
 .../SpaceQuotaViolationNotifierFactory.java |  62 ++
 .../SpaceQuotaViolationNotifierForTest.java |   4 +
 ...SpaceQuotaViolationPolicyRefresherChore.java | 154 ++
 .../TableSpaceQuotaViolationNotifier.java   |  55 +
 .../hbase/regionserver/HRegionServer.java   |  21 +-
 .../hbase/regionserver/RSRpcServices.java   |   7 +-
 .../regionserver/RegionServerServices.java  |  12 +-
 .../hadoop/hbase/MockRegionServerServices.java  |  10 +-
 .../hadoop/hbase/master/MockRegionServer.java   |  10 +-
 .../TestQuotaObserverChoreWithMiniCluster.java  |   2 +
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |  47 +
 .../hadoop/hbase/quotas/TestQuotaThrottle.java  |   4 +-
 .../TestRegionServerSpaceQuotaManager.java  | 127 
 ...SpaceQuotaViolationPolicyRefresherChore.java | 131 
 .../TestTableSpaceQuotaViolationNotifier.java   | 144 +
 22 files changed, 1281 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f6cc45e5/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 8ef4f08..b5eac48 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -24,16 +24,20 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
@@ -44,7 +48,12 @@ import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.filter.RegexStringComparator;
 import org.apache.hadoop.hbase.filter.RowFilter;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Strings;
 
@@ -53,9 +62,8 @@ import org.apache.hadoop.hbase.util.Strings;
  * 
  * ROW-KEY  FAM/QUALDATA
  *   n.namespace q:s global-quotas
- *   n.namespace u:dusize in bytes
  *   t.table q:s global-quotas
- *   t.table u:dusize in bytes
+ *   t.table u:vspace violation policy
  *   u.user  q:s global-quotas
  

[38/50] [abbrv] hbase git commit: HBASE-17002 JMX metrics and some UI additions for space quotas

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/3a2ec0bd/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index d56def5..4577bcf 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -13024,6 +13024,3031 @@ public final class QuotaProtos {
 
   }
 
+  public interface GetQuotaStatesRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.GetQuotaStatesRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetQuotaStatesRequest}
+   */
+  public  static final class GetQuotaStatesRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.GetQuotaStatesRequest)
+  GetQuotaStatesRequestOrBuilder {
+// Use GetQuotaStatesRequest.newBuilder() to construct.
+private 
GetQuotaStatesRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private GetQuotaStatesRequest() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private GetQuotaStatesRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetQuotaStatesRequest_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_GetQuotaStatesRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+public int getSerializedSize() {
+  int size = memoizedSize;
+  if (size != -1) return size;
+
+  size = 0;
+  size += unknownFields.getSerializedSize();
+  memoizedSize = size;
+  return size;
+}
+
+private static final long serialVersionUID = 0L;
+@java.lang.Override
+public boolean equals(final java.lang.Object obj) {
+  if (obj == this) {
+   return true;
+ 

[11/50] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6b40d4/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 1c373ee..23ddd43 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -141,6 +141,22 @@ message SplitTableRegionResponse {
   optional uint64 proc_id = 1;
 }
 
+message RegionSpaceUse {
+  optional RegionInfo region = 1; // A region identifier
+  optional uint64 size = 2; // The size in bytes of the region
+}
+
+/**
+ * Reports filesystem usage for regions.
+ */
+message RegionSpaceUseReportRequest {
+  repeated RegionSpaceUse space_use = 1;
+}
+
+message RegionSpaceUseReportResponse {
+
+}
+
 service RegionServerStatusService {
   /** Called when a region server first starts. */
   rpc RegionServerStartup(RegionServerStartupRequest)
@@ -182,4 +198,10 @@ service RegionServerStatusService {
*/
   rpc getProcedureResult(GetProcedureResultRequest)
 returns(GetProcedureResultResponse);
+
+  /**
+   * Reports Region filesystem space use
+   */
+  rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest)
+returns(RegionSpaceUseReportResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6b40d4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index baf962d..db48bdb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -94,6 +95,9 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
@@ -1901,4 +1905,19 @@ public class MasterRpcServices extends RSRpcServices
   throw new ServiceException(e);
 }
   }
+
+  @Override
+  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController 
controller,
+  RegionSpaceUseReportRequest request) throws ServiceException {
+try {
+  master.checkInitialized();
+  MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
+  for (RegionSpaceUse report : request.getSpaceUseList()) {
+quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), 
report.getSize());
+  }
+  return RegionSpaceUseReportResponse.newBuilder().build();
+} catch (Exception e) {
+  throw new ServiceException(e);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6b40d4/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
new file mode 100644
index 000..01540eb
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * 

[21/50] [abbrv] hbase git commit: HBASE-17001 Enforce quota violation policies in the RegionServer

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/6f2bee48/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index cc40536..d466e59 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -5778,6 +5778,1284 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest)
   }
 
+  public interface SpaceQuotaStatusOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional .hbase.pb.SpaceViolationPolicy policy = 1;
+/**
+ * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ */
+boolean hasPolicy();
+/**
+ * optional .hbase.pb.SpaceViolationPolicy policy = 1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy 
getPolicy();
+
+// optional bool in_violation = 2;
+/**
+ * optional bool in_violation = 2;
+ */
+boolean hasInViolation();
+/**
+ * optional bool in_violation = 2;
+ */
+boolean getInViolation();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SpaceQuotaStatus}
+   *
+   * 
+   * Represents the state of a quota on a table. Either the quota is not in 
violation
+   * or it is in violatino there is a violation policy which should be in 
effect.
+   * 
+   */
+  public static final class SpaceQuotaStatus extends
+  com.google.protobuf.GeneratedMessage
+  implements SpaceQuotaStatusOrBuilder {
+// Use SpaceQuotaStatus.newBuilder() to construct.
+private SpaceQuotaStatus(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SpaceQuotaStatus(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SpaceQuotaStatus defaultInstance;
+public static SpaceQuotaStatus getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SpaceQuotaStatus getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SpaceQuotaStatus(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  int rawValue = input.readEnum();
+  
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy 
value = 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue);
+  if (value == null) {
+unknownFields.mergeVarintField(1, rawValue);
+  } else {
+bitField0_ |= 0x0001;
+policy_ = value;
+  }
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  inViolation_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuotaStatus_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 

[03/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/0224ea1e/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 05894b9..1925828 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -217,12 +217,20 @@ public final class QuotaProtos {
  * THROTTLE = 1;
  */
 THROTTLE(0, 1),
+/**
+ * SPACE = 2;
+ */
+SPACE(1, 2),
 ;
 
 /**
  * THROTTLE = 1;
  */
 public static final int THROTTLE_VALUE = 1;
+/**
+ * SPACE = 2;
+ */
+public static final int SPACE_VALUE = 2;
 
 
 public final int getNumber() { return value; }
@@ -230,6 +238,7 @@ public final class QuotaProtos {
 public static QuotaType valueOf(int value) {
   switch (value) {
 case 1: return THROTTLE;
+case 2: return SPACE;
 default: return null;
   }
 }
@@ -281,6 +290,142 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   *
+   * 
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+   */
+  public enum SpaceViolationPolicy
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * DISABLE = 1;
+ *
+ * 
+ * Disable the table(s)
+ * 
+ */
+DISABLE(0, 1),
+/**
+ * NO_WRITES_COMPACTIONS = 2;
+ *
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ */
+NO_WRITES_COMPACTIONS(1, 2),
+/**
+ * NO_WRITES = 3;
+ *
+ * 
+ * No writes or bulk-loads
+ * 
+ */
+NO_WRITES(2, 3),
+/**
+ * NO_INSERTS = 4;
+ *
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ */
+NO_INSERTS(3, 4),
+;
+
+/**
+ * DISABLE = 1;
+ *
+ * 
+ * Disable the table(s)
+ * 
+ */
+public static final int DISABLE_VALUE = 1;
+/**
+ * NO_WRITES_COMPACTIONS = 2;
+ *
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ */
+public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+/**
+ * NO_WRITES = 3;
+ *
+ * 
+ * No writes or bulk-loads
+ * 
+ */
+public static final int NO_WRITES_VALUE = 3;
+/**
+ * NO_INSERTS = 4;
+ *
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ */
+public static final int NO_INSERTS_VALUE = 4;
+
+
+public final int getNumber() { return value; }
+
+public static SpaceViolationPolicy valueOf(int value) {
+  switch (value) {
+case 1: return DISABLE;
+case 2: return NO_WRITES_COMPACTIONS;
+case 3: return NO_WRITES;
+case 4: return NO_INSERTS;
+default: return null;
+  }
+}
+
+public static 
com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static 
com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() 
{
+public SpaceViolationPolicy findValueByNumber(int number) {
+  return SpaceViolationPolicy.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+}
+
+private static final SpaceViolationPolicy[] VALUES = values();
+
+public static SpaceViolationPolicy valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private SpaceViolationPolicy(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy)
+  }
+
   public interface TimedQuotaOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
@@ -3315,6 +3460,20 @@ public final class QuotaProtos {
 

[06/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

2017-05-19 Thread elserj
HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0224ea1e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0224ea1e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0224ea1e

Branch: refs/heads/HBASE-16961
Commit: 0224ea1e6502ee2e02676f3ed48810cdfa8fea06
Parents: 5c728b0
Author: tedyu 
Authored: Thu Nov 17 10:19:52 2016 -0800
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../hbase/quotas/QuotaSettingsFactory.java  |   47 +
 .../apache/hadoop/hbase/quotas/QuotaType.java   |1 +
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  166 ++
 .../hbase/quotas/SpaceViolationPolicy.java  |   44 +
 .../hbase/shaded/protobuf/ProtobufUtil.java |   51 +
 .../hbase/quotas/TestQuotaSettingsFactory.java  |  148 ++
 .../hbase/quotas/TestSpaceLimitSettings.java|  119 ++
 .../shaded/protobuf/generated/MasterProtos.java | 1310 +++--
 .../shaded/protobuf/generated/QuotaProtos.java  | 1739 +-
 .../src/main/protobuf/Master.proto  |2 +
 .../src/main/protobuf/Quota.proto   |   21 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 1682 -
 hbase-protocol/src/main/protobuf/Quota.proto|   21 +
 13 files changed, 4654 insertions(+), 697 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0224ea1e/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 3622a32..8512e39 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRe
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 
 @InterfaceAudience.Public
 public class QuotaSettingsFactory {
@@ -89,6 +90,9 @@ public class QuotaSettingsFactory {
 if (quotas.getBypassGlobals() == true) {
   settings.add(new QuotaGlobalsSettingsBypass(userName, tableName, 
namespace, true));
 }
+if (quotas.hasSpace()) {
+  settings.add(fromSpace(tableName, namespace, quotas.getSpace()));
+}
 return settings;
   }
 
@@ -122,6 +126,18 @@ public class QuotaSettingsFactory {
 return settings;
   }
 
+  static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota 
protoQuota) {
+if ((null == table && null == namespace) || (null != table && null != 
namespace)) {
+  throw new IllegalArgumentException("Can only construct 
SpaceLimitSettings for a table or namespace.");
+}
+if (null != table) {
+  return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
+} else {
+  // namespace must be non-null
+  return SpaceLimitSettings.fromSpaceQuota(namespace, protoQuota);
+}
+  }
+
   /* ==
*  RPC Throttle
*/
@@ -278,4 +294,35 @@ public class QuotaSettingsFactory {
   public static QuotaSettings bypassGlobals(final String userName, final 
boolean bypassGlobals) {
 return new QuotaGlobalsSettingsBypass(userName, null, null, bypassGlobals);
   }
+
+  /* ==
+   *  FileSystem Space Settings
+   */
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given table to the given size in bytes.
+   * When the space usage is exceeded by the table, the provided {@link 
SpaceViolationPolicy} is enacted on the table.
+   *
+   * @param tableName The name of the table on which the quota should be 
applied.
+   * @param sizeLimit The limit of a table's size in bytes.
+   * @param violationPolicy The action to take when the quota is exceeded.
+   * @return An {@link QuotaSettings} object.
+   */
+  public static QuotaSettings limitTableSpace(final TableName tableName, long 
sizeLimit, final SpaceViolationPolicy violationPolicy) {
+return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
+  }
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space 
usage for the given namespace to the given size in 

[30/50] [abbrv] hbase git commit: HBASE-17428 Implement informational RPCs for space quotas

2017-05-19 Thread elserj
HBASE-17428 Implement informational RPCs for space quotas

Create some RPCs that can expose the in-memory state that the
RegionServers and Master hold to drive the space quota "state machine".
Then, create some hbase shell commands to interact with those.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df670806
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df670806
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df670806

Branch: refs/heads/HBASE-16961
Commit: df67080697fdf8ac7f02d5b6905c25c728d26d0b
Parents: dfe7a70
Author: Josh Elser 
Authored: Tue Feb 21 15:36:39 2017 -0500
Committer: Josh Elser 
Committed: Fri May 19 12:25:19 2017 -0400

--
 .../hbase/client/ConnectionImplementation.java  |9 +
 .../hadoop/hbase/client/QuotaStatusCalls.java   |  125 +
 .../client/ShortCircuitMasterConnection.java|7 +
 .../hadoop/hbase/quotas/QuotaTableUtil.java |   77 +
 .../hbase/shaded/protobuf/RequestConverter.java |   33 +
 .../shaded/protobuf/generated/AdminProtos.java  |  650 +-
 .../shaded/protobuf/generated/MasterProtos.java |   88 +-
 .../shaded/protobuf/generated/QuotaProtos.java  | 5986 +-
 .../generated/RegionServerStatusProtos.java |   28 +-
 .../src/main/protobuf/Admin.proto   |9 +
 .../src/main/protobuf/Master.proto  |4 +
 .../src/main/protobuf/Quota.proto   |   35 +
 .../hbase/protobuf/generated/QuotaProtos.java   |6 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   60 +
 .../hbase/quotas/ActivePolicyEnforcement.java   |8 +
 .../hbase/regionserver/RSRpcServices.java   |   57 +
 .../hadoop/hbase/master/MockRegionServer.java   |   18 +
 .../hbase/quotas/TestQuotaStatusRPCs.java   |  192 +
 hbase-shell/src/main/ruby/hbase/quotas.rb   |   16 +
 hbase-shell/src/main/ruby/shell.rb  |3 +
 .../ruby/shell/commands/list_quota_snapshots.rb |   59 +
 .../shell/commands/list_quota_table_sizes.rb|   47 +
 .../shell/commands/list_quota_violations.rb |   48 +
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  |   24 -
 .../test/ruby/hbase/quotas_test_no_cluster.rb   |   69 +
 25 files changed, 7212 insertions(+), 446 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 564cfb4..5242efc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -92,6 +92,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
@@ -1750,6 +1752,13 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   ListReplicationPeersRequest request) throws ServiceException {
 return stub.listReplicationPeers(controller, request);
   }
+
+  @Override
+  public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(
+  RpcController controller, GetSpaceQuotaRegionSizesRequest request)
+  throws ServiceException {
+return stub.getSpaceQuotaRegionSizes(controller, request);
+  }
 };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/df670806/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/QuotaStatusCalls.java
new file mode 100644
index 000..f0f385d
--- 

[08/50] [abbrv] hbase git commit: HBASE-16998 Implement Master-side analysis of region space reports

2017-05-19 Thread elserj
HBASE-16998 Implement Master-side analysis of region space reports

Adds a new Chore to the Master that analyzes the reports that are
sent by RegionServers. The Master must then, for all tables with
quotas, determine the tables that are violating quotas and move
those tables into violation. Similarly, tables no longer violating
the quota can be moved out of violation.

The Chore is the "stateful" bit, managing which tables are and
are not in violation. Everything else is just performing
computation and informing the Chore on the updated state.

Added InterfaceAudience annotations and clean up the QuotaObserverChore
constructor. Cleaned up some javadoc and QuotaObserverChore. Reuse
the QuotaViolationStore impl objects.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/851fb37a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/851fb37a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/851fb37a

Branch: refs/heads/HBASE-16961
Commit: 851fb37a9805c4e17bc028d5ddfcc28045ed524e
Parents: 1ff16d4
Author: Josh Elser 
Authored: Tue Nov 8 18:55:12 2016 -0500
Committer: Josh Elser 
Committed: Fri May 19 11:56:05 2017 -0400

--
 .../hadoop/hbase/quotas/QuotaRetriever.java |  27 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  20 +
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   1 +
 .../quotas/NamespaceQuotaViolationStore.java| 127 
 .../hadoop/hbase/quotas/QuotaObserverChore.java | 618 +++
 .../hbase/quotas/QuotaViolationStore.java   |  89 +++
 .../quotas/SpaceQuotaViolationNotifier.java |  44 ++
 .../SpaceQuotaViolationNotifierForTest.java |  50 ++
 .../hbase/quotas/TableQuotaViolationStore.java  | 127 
 .../TestNamespaceQuotaViolationStore.java   | 156 +
 .../hbase/quotas/TestQuotaObserverChore.java| 106 
 .../TestQuotaObserverChoreWithMiniCluster.java  | 596 ++
 .../hadoop/hbase/quotas/TestQuotaTableUtil.java |   4 -
 .../quotas/TestTableQuotaViolationStore.java| 151 +
 .../hbase/quotas/TestTablesWithQuotas.java  | 198 ++
 15 files changed, 2305 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/851fb37a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
index 0f7baa5..4482693 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java
@@ -22,6 +22,7 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Objects;
 import java.util.Queue;
 
 import org.apache.commons.logging.Log;
@@ -54,11 +55,23 @@ public class QuotaRetriever implements Closeable, 
Iterable {
   private Connection connection;
   private Table table;
 
-  private QuotaRetriever() {
+  /**
+   * Should QutoaRetriever manage the state of the connection, or leave it be.
+   */
+  private boolean isManagedConnection = false;
+
+  QuotaRetriever() {
   }
 
   void init(final Configuration conf, final Scan scan) throws IOException {
-this.connection = ConnectionFactory.createConnection(conf);
+// Set this before creating the connection and passing it down to make sure
+// it's cleaned up if we fail to construct the Scanner.
+this.isManagedConnection = true;
+init(ConnectionFactory.createConnection(conf), scan);
+  }
+
+  void init(final Connection conn, final Scan scan) throws IOException {
+this.connection = Objects.requireNonNull(conn);
 this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME);
 try {
   scanner = table.getScanner(scan);
@@ -77,10 +90,14 @@ public class QuotaRetriever implements Closeable, 
Iterable {
   this.table.close();
   this.table = null;
 }
-if (this.connection != null) {
-  this.connection.close();
-  this.connection = null;
+// Null out the connection on close() even if we didn't explicitly close it
+// to maintain typical semantics.
+if (isManagedConnection) {
+  if (this.connection != null) {
+this.connection.close();
+  }
 }
+this.connection = null;
   }
 
   public QuotaSettings next() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/851fb37a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 

[04/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

2017-05-19 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/0224ea1e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index 01ba8f6..e3c6bfd 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -239,12 +239,20 @@ public final class QuotaProtos {
  * THROTTLE = 1;
  */
 THROTTLE(1),
+/**
+ * SPACE = 2;
+ */
+SPACE(2),
 ;
 
 /**
  * THROTTLE = 1;
  */
 public static final int THROTTLE_VALUE = 1;
+/**
+ * SPACE = 2;
+ */
+public static final int SPACE_VALUE = 2;
 
 
 public final int getNumber() {
@@ -262,6 +270,7 @@ public final class QuotaProtos {
 public static QuotaType forNumber(int value) {
   switch (value) {
 case 1: return THROTTLE;
+case 2: return SPACE;
 default: return null;
   }
 }
@@ -311,6 +320,150 @@ public final class QuotaProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * 
+   * Defines what action should be taken when the SpaceQuota is violated
+   * 
+   *
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   */
+  public enum SpaceViolationPolicy
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * 
+ * Disable the table(s)
+ * 
+ *
+ * DISABLE = 1;
+ */
+DISABLE(1),
+/**
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ *
+ * NO_WRITES_COMPACTIONS = 2;
+ */
+NO_WRITES_COMPACTIONS(2),
+/**
+ * 
+ * No writes or bulk-loads
+ * 
+ *
+ * NO_WRITES = 3;
+ */
+NO_WRITES(3),
+/**
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ *
+ * NO_INSERTS = 4;
+ */
+NO_INSERTS(4),
+;
+
+/**
+ * 
+ * Disable the table(s)
+ * 
+ *
+ * DISABLE = 1;
+ */
+public static final int DISABLE_VALUE = 1;
+/**
+ * 
+ * No writes, bulk-loads, or compactions
+ * 
+ *
+ * NO_WRITES_COMPACTIONS = 2;
+ */
+public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+/**
+ * 
+ * No writes or bulk-loads
+ * 
+ *
+ * NO_WRITES = 3;
+ */
+public static final int NO_WRITES_VALUE = 3;
+/**
+ * 
+ * No puts or bulk-loads, but deletes are allowed
+ * 
+ *
+ * NO_INSERTS = 4;
+ */
+public static final int NO_INSERTS_VALUE = 4;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static SpaceViolationPolicy valueOf(int value) {
+  return forNumber(value);
+}
+
+public static SpaceViolationPolicy forNumber(int value) {
+  switch (value) {
+case 1: return DISABLE;
+case 2: return NO_WRITES_COMPACTIONS;
+case 3: return NO_WRITES;
+case 4: return NO_INSERTS;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+SpaceViolationPolicy> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public SpaceViolationPolicy findValueByNumber(int number) {
+  return SpaceViolationPolicy.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+}
+
+private static final SpaceViolationPolicy[] VALUES = values();
+
+public static SpaceViolationPolicy valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new 

hbase git commit: HBASE-17286 Add goal to remote-resources plugin

2017-05-19 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/master 3fe4b28bb -> 5c728b0f0


HBASE-17286 Add goal to remote-resources plugin

With Apache parent pom v12 our remote-resources-plugin execution id was
shadowing the parent declaration, and our configuration would get run to
aggregate LICENSE files correctly. When upgrading to v18, apache changed
the execution id, so our configuration no longer gets used.

Add an explicit goal to our usage of the remote-resources-plugin and
change the name to something more descriptive and less likely to
conflict (either intentionally or not).

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c728b0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c728b0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c728b0f

Branch: refs/heads/master
Commit: 5c728b0f0c84a7a30ae1618e611c5ebd836e04b9
Parents: 3fe4b28
Author: Mike Drob 
Authored: Thu May 18 15:19:21 2017 -0700
Committer: Josh Elser 
Committed: Fri May 19 11:37:49 2017 -0400

--
 hbase-assembly/pom.xml | 5 -
 hbase-shaded/pom.xml   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c728b0f/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 19bf5f0..79d154d 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -44,7 +44,10 @@
 1.5
 
   
-default
+aggregate-licenses
+
+  process
+
 
   
 ${build.year}

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c728b0f/hbase-shaded/pom.xml
--
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index 10edf42..520eb61 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -72,7 +72,10 @@
   1.5
   
 
-  default
+  aggregate-licenses
+  
+process
+  
   
 
   ${build.year}



[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index f9ae47a..464462c 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -166,10 +166,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupInfo.BackupState
-org.apache.hadoop.hbase.backup.BackupType
+org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
+org.apache.hadoop.hbase.backup.BackupType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
index 8aaf388..e86b0c6 100644
--- a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
@@ -88,12 +88,12 @@
 
 Annotation Type Hierarchy
 
-org.apache.hadoop.hbase.classification.InterfaceStability.Stable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceAudience.Public (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceStability.Unstable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceAudience.Private (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceStability.Evolving (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceAudience.Public (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceAudience.Private (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceStability.Unstable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceStability.Stable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
index 00e2c2c..3350c1b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
+++ 

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index 3a1d268..2b36b31 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -210,7 +210,7 @@ extends ConnectionImplementation
-abort,
 cacheLocation,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close, deleteCachedRegionLocation,
 finalize,
 getAdmin,
 getAdmin,
 getAsyncProcess,
 getBackoffPolicy,
 getBufferedMutator, getBufferedMutator,
 getCachedLocation,
 getClient,
 getConfiguration,
 getConnectionConfiguration,
 getConnectionMetrics,
 getCurrentBatchPool, getCurrentMetaLookupPool,
 getCurrentNrHRS,
 getKeepAliveMasterService,
 getKeepAliveZooKeeperWatcher,
 getMaster,
 getNewRpcRetryingCallerFactory,
 getNonceGenerator, getNumberOfCachedRegionLocations,
 getRegionLocation,
 getRegionLocator,
 getRpcClient,
 getRpcControllerFactory,
 getRpcRetryingCallerFa
 ctory, getStatisticsTracker,
 getTable,
 getTableBuilder,
 getTableState,
 hasCellBlockSupport,
 injectNonceG
 eneratorForTesting, isAborted,
 isClosed,
 isDeadServer,
 isMasterRunning,
 isTableAvailable,
 isTableEnabled,
 locateRegion,
 locateRegion,
 locateRegion,
 locateRegion,
 locateRegions,
 locateRegions,
 relea
 seMaster, releaseZooKeeperWatcher,
 relocateRegion,
 relocateRegion,
 retrieveClusterId,
 toString,
 upd
 ateCachedLocation, updateCachedLocations
+abort,
 cacheLocation,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close, deleteCachedRegionLocation,
 finalize,
 getAdmin,
 getAdmin,
 getAsyncProcess,
 getBackoffPolicy,
 getBufferedMutator, getBufferedMutator,
 getCachedLocation,
 getClient,
 getConfiguration,
 getConnectionConfiguration,
 getConnectionMetrics,
 getCurrentBatchPool, getCurrentMetaLookupPool,
 getCurrentNrHRS,
 getKeepAliveMasterService,
 getKeepAliveZooKeeperWatcher,
 getMaster,
 getNewRpcRetryingCallerFactory,
 getNonceGenerator, getNumberOfCachedRegionLocations,
 getRegionLocation,
 getRegionLocator,
 getRpcClient,
 getRpcControllerFactory,
 getRpcRetryingCallerFa
 ctory, getStatisticsTracker,
 getTable,
 getTableBuilder,
 getTableState,
 hasCellBlockSupport,
 injectNonceG
 eneratorForTesting, isAborted,
 isClosed,
 isDeadServer,
 isMasterRunning,
 isTableAvailable,
 isTableEnabled,
 locateRegion,
 locateRegion,
 locateRegion,
 locateRegion,
 locateRegions,
 locateRegions,
 relea
 seMaster, releaseZooKeeperWatcher,
 relocateRegion,
 relocateRegion,
 retrieveClusterId,
 setUseMetaReplicas,
 toString,
 updateCachedLocation,
 updateCachedLocations
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/client/CoprocessorHConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/CoprocessorHConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/CoprocessorHConnection.html
index 5ca8ec3..41034fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/CoprocessorHConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/CoprocessorHConnection.html
@@ -247,7 +247,7 @@ extends ConnectionImplementation
-abort,
 cacheLocation,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close, deleteCachedRegionLocation,
 finalize,
 getAdmin,
 getAdmin,
 getAsyncProcess,
 getBackoffPolicy,
 getBufferedMutator, getBufferedMutator,
 getCachedLocation,
 getConfiguration,
 getConnectionConfiguration,
 getConnectionMetrics,
 getCurrentBatchPool,
 getCurrentMetaLookupPool, getCurrentNrHRS,
 getKeepAliveMasterService,
 getKeepAliveZooKeeperWatcher,
 getMaster,
 getNewRpcRetryingCallerFactory,
 getNumberOfCachedRegionLocations,
 getRegionLocation,
 getRegionLocator,
 getRpcClient,
 getRpcControllerFactory,
 getRpcRetryingCallerFactory,
 getStatisticsTracker,
 
 getTable, getTableBuilder,
 getTableState,
 hasCellBlockSupport,
 injectNonceGeneratorForTesting,
 isAborted,
 isClosed,
 isDeadServer,
 isMasterRunning,
 isTableAvailable,
 isTableDisabled,
 isTableEnabled,
 locateRegion,
 locateRegion, locateRegion,
 locateRegion,
 locateRegions,
 locateRegions,
 releaseMaster,
 releaseZooKeeperWatcher,
 relocateRegion,
 relocateRegion,
 retrieveClusterId,
 toString,
 updateCachedLocation,
 updateCachedLocations
+abort,
 cacheLocation,
 clearCaches,
 clearRegionCache,
 clearRegionCache,
 clearRegionCache,
 close, deleteCachedRegionLocation,
 finalize,
 getAdmin,
 getAdmin,
 getAsyncProcess,
 getBackoffPolicy,
 getBufferedMutator, getBufferedMutator,
 getCachedLocation,
 getConfiguration,
 

[42/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/org/apache/hadoop/hbase/HConstants.html
index 21f6fcd..a99b408 100644
--- a/devapidocs/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/org/apache/hadoop/hbase/HConstants.html
@@ -890,6 +890,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+Parameter name for HBase client meta replica scan call 
timeout.
+
+
+
+static int
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+Default HBase client meta replica scan call timeout, 1 
second
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 HBASE_CLIENT_META_OPERATION_TIMEOUT
 Parameter name for HBase client operation timeout.
 
@@ -3013,13 +3025,41 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+Parameter name for HBase client meta replica scan call 
timeout.
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+public static finalint HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+Default HBase client meta replica scan call timeout, 1 
second
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 HREGION_LOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_LOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_LOGDIR_NAME
 Used to construct the name of the log directory for a 
region server
 
 See Also:
@@ -3033,7 +3073,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SPLIT_LOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SPLIT_LOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SPLIT_LOGDIR_NAME
 Used to construct the name of the splitlog directory for a 
region server
 
 See Also:
@@ -3047,7 +3087,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HREGION_OLDLOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_OLDLOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_OLDLOGDIR_NAME
 Like the previous, but for old logs that are about to be 
deleted
 
 See Also:
@@ -3061,7 +3101,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BULKLOAD_STAGING_DIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BULKLOAD_STAGING_DIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BULKLOAD_STAGING_DIR_NAME
 Staging dir used by bulk load
 
 See Also:
@@ -3075,7 +3115,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CORRUPT_DIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CORRUPT_DIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CORRUPT_DIR_NAME
 
 See Also:
 Constant
 Field Values
@@ -3088,7 +3128,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HBCK_SIDELINEDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBCK_SIDELINEDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBCK_SIDELINEDIR_NAME
 Used by HBCK to sideline backup data
 
 See Also:
@@ -3102,7 +3142,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 

[47/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Increment.html 
b/apidocs/org/apache/hadoop/hbase/client/Increment.html
index 0fde789..28c9f90 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -491,7 +491,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getTimeRange
-publicTimeRangegetTimeRange()
+publicTimeRangegetTimeRange()
 Gets the TimeRange used for this increment.
 
 Returns:
@@ -505,7 +505,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-publicIncrementsetTimeRange(longminStamp,
+publicIncrementsetTimeRange(longminStamp,
   longmaxStamp)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -533,7 +533,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-publicIncrementsetReturnResults(booleanreturnResults)
+publicIncrementsetReturnResults(booleanreturnResults)
 
 Parameters:
 returnResults - True (default) if the increment operation 
should return the results. A
@@ -548,7 +548,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Returns:
 current setting for returnResults
@@ -561,7 +561,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 numFamilies
-publicintnumFamilies()
+publicintnumFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -577,7 +577,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-publicbooleanhasFamilies()
+publicbooleanhasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 
 Returns:
@@ -591,7 +591,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
 Before 0.95, when you called Increment#getFamilyMap(), you 
got back
  a map of families to a list of Longs.  Now, Mutation.getFamilyCellMap()
 returns
  families by list of Cells.  This method has been added so you can have the
@@ -610,7 +610,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 Description copied from 
class:Operation
 Produces a string representation of this Operation. It 
defaults to a JSON
  representation, but falls back to a string representation of the
@@ -629,7 +629,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 compareTo
-publicintcompareTo(Rowi)
+publicintcompareTo(Rowi)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-;
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRow
@@ -644,7 +644,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hashCode
-publicinthashCode()
+publicinthashCode()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCodein 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -657,7 +657,7 @@ implements 

[46/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 8adbacb..ac13492 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -315,1063 +315,1070 @@
 307  /** Default HBase client operation 
timeout, which is tantamount to a blocking call */
 308  public static final int 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = 120;
 309
-310  /** Used to construct the name of the 
log directory for a region server */
-311  public static final String 
HREGION_LOGDIR_NAME = "WALs";
-312
-313  /** Used to construct the name of the 
splitlog directory for a region server */
-314  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
-315
-316  /** Like the previous, but for old logs 
that are about to be deleted */
-317  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
-318
-319  /** Staging dir used by bulk load */
-320  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
-321
-322  public static final String 
CORRUPT_DIR_NAME = "corrupt";
-323
-324  /** Used by HBCK to sideline backup 
data */
-325  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
-326
-327  /** Any artifacts left from migration 
can be moved here */
-328  public static final String 
MIGRATION_NAME = ".migration";
-329
-330  /**
-331   * The directory from which 
co-processor/custom filter jars can be loaded
-332   * dynamically by the region servers. 
This value can be overridden by the
-333   * hbase.dynamic.jars.dir config.
-334   */
-335  public static final String LIB_DIR = 
"lib";
+310  /** Parameter name for HBase client 
meta replica scan call timeout. */
+311  public static final String 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT =
+312  
"hbase.client.meta.replica.scan.timeout";
+313
+314  /** Default HBase client meta replica 
scan call timeout, 1 second */
+315  public static final int 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT = 100;
+316
+317  /** Used to construct the name of the 
log directory for a region server */
+318  public static final String 
HREGION_LOGDIR_NAME = "WALs";
+319
+320  /** Used to construct the name of the 
splitlog directory for a region server */
+321  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
+322
+323  /** Like the previous, but for old logs 
that are about to be deleted */
+324  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
+325
+326  /** Staging dir used by bulk load */
+327  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
+328
+329  public static final String 
CORRUPT_DIR_NAME = "corrupt";
+330
+331  /** Used by HBCK to sideline backup 
data */
+332  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
+333
+334  /** Any artifacts left from migration 
can be moved here */
+335  public static final String 
MIGRATION_NAME = ".migration";
 336
-337  /** Used to construct the name of the 
compaction directory during compaction */
-338  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
-339
-340  /** Conf key for the max file size 
after which we split the region */
-341  public static final String 
HREGION_MAX_FILESIZE =
-342  "hbase.hregion.max.filesize";
+337  /**
+338   * The directory from which 
co-processor/custom filter jars can be loaded
+339   * dynamically by the region servers. 
This value can be overridden by the
+340   * hbase.dynamic.jars.dir config.
+341   */
+342  public static final String LIB_DIR = 
"lib";
 343
-344  /** Default maximum file size */
-345  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+344  /** Used to construct the name of the 
compaction directory during compaction */
+345  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
 346
-347  /**
-348   * Max size of single row for Get's or 
Scan's without in-row scanning flag set.
-349   */
-350  public static final String 
TABLE_MAX_ROWSIZE_KEY = "hbase.table.max.rowsize";
-351
-352  /**
-353   * Default max row size (1 Gb).
-354   */
-355  public static final long 
TABLE_MAX_ROWSIZE_DEFAULT = 1024 * 1024 * 1024L;
-356
-357  /**
-358   * The max number of threads used for 
opening and closing stores or store
-359   * files in parallel
-360   */
-361  public static final String 
HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
-362
"hbase.hstore.open.and.close.threads.max";
+347  /** Conf key for the max file size 
after which we split the region */
+348  public static final String 
HREGION_MAX_FILESIZE =
+349  "hbase.hregion.max.filesize";
+350
+351  /** Default maximum file size */
+352  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+353
+354  /**
+355   * Max 

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 9d9e0c6..768dc6c 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.6, based on Prawn 1.2.1)
 /Producer (Apache HBase Team)
-/CreationDate (D:20170518144822+00'00')
-/ModDate (D:20170518144822+00'00')
+/CreationDate (D:20170519144717+00'00')
+/ModDate (D:20170519144717+00'00')
 >>
 endobj
 2 0 obj
@@ -22,7 +22,7 @@ endobj
 3 0 obj
 << /Type /Pages
 /Count 673
-/Kids [7 0 R 13 0 R 15 0 R 17 0 R 19 0 R 21 0 R 23 0 R 39 0 R 43 0 R 47 0 R 55 
0 R 58 0 R 60 0 R 62 0 R 66 0 R 71 0 R 74 0 R 79 0 R 81 0 R 84 0 R 86 0 R 92 0 
R 101 0 R 106 0 R 108 0 R 120 0 R 123 0 R 130 0 R 136 0 R 145 0 R 154 0 R 166 0 
R 170 0 R 172 0 R 176 0 R 182 0 R 184 0 R 186 0 R 188 0 R 190 0 R 193 0 R 199 0 
R 202 0 R 204 0 R 206 0 R 208 0 R 210 0 R 212 0 R 214 0 R 217 0 R 220 0 R 224 0 
R 226 0 R 228 0 R 230 0 R 232 0 R 234 0 R 236 0 R 238 0 R 245 0 R 247 0 R 249 0 
R 251 0 R 255 0 R 258 0 R 263 0 R 268 0 R 271 0 R 274 0 R 290 0 R 299 0 R 305 0 
R 317 0 R 326 0 R 331 0 R 333 0 R 335 0 R 346 0 R 351 0 R 355 0 R 360 0 R 367 0 
R 378 0 R 390 0 R 404 0 R 413 0 R 415 0 R 417 0 R 423 0 R 434 0 R 445 0 R 456 0 
R 459 0 R 462 0 R 466 0 R 470 0 R 474 0 R 477 0 R 479 0 R 482 0 R 486 0 R 488 0 
R 492 0 R 497 0 R 501 0 R 507 0 R 509 0 R 515 0 R 517 0 R 521 0 R 530 0 R 532 0 
R 536 0 R 539 0 R 542 0 R 545 0 R 559 0 R 566 0 R 573 0 R 585 0 R 591 0 R 599 0 
R 608 0 R 611 0 R 615 0 R 618 0 R 629 0
  R 637 0 R 643 0 R 648 0 R 652 0 R 654 0 R 668 0 R 680 0 R 686 0 R 692 0 R 695 
0 R 704 0 R 712 0 R 716 0 R 721 0 R 726 0 R 728 0 R 730 0 R 732 0 R 740 0 R 749 
0 R 753 0 R 761 0 R 769 0 R 775 0 R 779 0 R 785 0 R 790 0 R 795 0 R 803 0 R 805 
0 R 809 0 R 814 0 R 820 0 R 823 0 R 830 0 R 840 0 R 844 0 R 846 0 R 849 0 R 853 
0 R 858 0 R 861 0 R 873 0 R 877 0 R 882 0 R 890 0 R 895 0 R 899 0 R 903 0 R 905 
0 R 908 0 R 910 0 R 914 0 R 916 0 R 920 0 R 924 0 R 928 0 R 933 0 R 938 0 R 941 
0 R 943 0 R 950 0 R 956 0 R 964 0 R 973 0 R 977 0 R 982 0 R 986 0 R 988 0 R 997 
0 R 1000 0 R 1005 0 R 1008 0 R 1017 0 R 1020 0 R 1026 0 R 1033 0 R 1036 0 R 
1038 0 R 1047 0 R 1049 0 R 1051 0 R 1054 0 R 1056 0 R 1058 0 R 1060 0 R 1062 0 
R 1064 0 R 1068 0 R 1072 0 R 1077 0 R 1079 0 R 1081 0 R 1083 0 R 1085 0 R 1090 
0 R 1099 0 R 1102 0 R 1104 0 R 1106 0 R  0 R 1113 0 R 1116 0 R 1118 0 R 
1120 0 R 1122 0 R 1125 0 R 1130 0 R 1135 0 R 1145 0 R 1150 0 R 1164 0 R 1177 0 
R 1190 0 R 1199 0 R 1213 0 R 1217 0 R 1227 0 R 12
 40 0 R 1243 0 R 1255 0 R 1264 0 R 1271 0 R 1275 0 R 1285 0 R 1290 0 R 1294 0 R 
1300 0 R 1306 0 R 1313 0 R 1321 0 R 1323 0 R 1335 0 R 1337 0 R 1342 0 R 1346 0 
R 1351 0 R 1361 0 R 1367 0 R 1373 0 R 1375 0 R 1377 0 R 1390 0 R 1396 0 R 1404 
0 R 1409 0 R 1421 0 R 1428 0 R 1433 0 R 1443 0 R 1451 0 R 1454 0 R 1460 0 R 
1464 0 R 1467 0 R 1472 0 R 1475 0 R 1479 0 R 1485 0 R 1489 0 R 1494 0 R 1500 0 
R 1504 0 R 1507 0 R 1509 0 R 1517 0 R 1525 0 R 1531 0 R 1536 0 R 1540 0 R 1543 
0 R 1549 0 R 1555 0 R 1560 0 R 1562 0 R 1564 0 R 1567 0 R 1569 0 R 1577 0 R 
1580 0 R 1586 0 R 1594 0 R 1598 0 R 1603 0 R 1609 0 R 1612 0 R 1614 0 R 1616 0 
R 1618 0 R 1625 0 R 1635 0 R 1637 0 R 1639 0 R 1641 0 R 1643 0 R 1646 0 R 1648 
0 R 1650 0 R 1652 0 R 1655 0 R 1657 0 R 1659 0 R 1661 0 R 1665 0 R 1669 0 R 
1678 0 R 1680 0 R 1682 0 R 1684 0 R 1686 0 R 1693 0 R 1695 0 R 1700 0 R 1702 0 
R 1704 0 R 1711 0 R 1716 0 R 1722 0 R 1726 0 R 1729 0 R 1732 0 R 1736 0 R 1738 
0 R 1741 0 R 1743 0 R 1745 0 R 1747 0 R 1751 0 R 1753 0 R 
 1756 0 R 1758 0 R 1760 0 R 1762 0 R 1764 0 R 1772 0 R 1775 0 R 1780 0 R 1782 0 
R 1784 0 R 1786 0 R 1788 0 R 1796 0 R 1807 0 R 1810 0 R 1824 0 R 1836 0 R 1840 
0 R 1846 0 R 1851 0 R 1854 0 R 1859 0 R 1861 0 R 1866 0 R 1868 0 R 1871 0 R 
1873 0 R 1875 0 R 1877 0 R 1879 0 R 1883 0 R 1885 0 R 1889 0 R 1893 0 R 1900 0 
R 1907 0 R 1918 0 R 1932 0 R 1944 0 R 1961 0 R 1965 0 R 1967 0 R 1971 0 R 1988 
0 R 1996 0 R 2003 0 R 2012 0 R 2018 0 R 2028 0 R 2039 0 R 2045 0 R 2054 0 R 
2066 0 R 2083 0 R 2094 0 R 2097 0 R 2106 0 R 2121 0 R 2128 0 R 2131 0 R 2136 0 
R 2141 0 R 2151 0 R 2159 0 R 2162 0 R 2164 0 R 2168 0 R 2183 0 R 2192 0 R 2197 
0 R 2201 0 R 2204 0 R 2206 0 R 2208 0 R 2210 0 R 2212 0 R 2217 0 R 2219 0 R 
2229 0 R 2239 0 R 2246 0 R 2258 0 R 2263 0 R 2267 0 R 2280 0 R 2287 0 R 2293 0 
R 2295 0 R 2305 0 R 2312 0 R 2323 0 R 2327 0 R 2338 0 R 2344 0 R 2354 0 R 2363 
0 R 2371 0 R 2377 0 R 2382 0 R 2386 0 R 2390 0 R 2392 0 R 2398 0 R 2402 0 R 
2406 0 R 2412 0 R 2419 0 R 2424 0 R 2428 0 R 2437 0 R 2442 0 
 R 2447 0 R 2460 0 R 2467 0 R 2470 0 R 2476 0 R 2482 0 R 2486 0 R 2490 0 R 2498 
0 R 2504 0 R 2506 0 R 2512 0 R 2517 0 R 2520 0 R 2530 0 R 2536 0 R 2545 0 R 
2549 0 R 2558 0 R 2563 0 R 2566 0 R 2576 0 R 2580 0 R 2585 0 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 45e2b7a..7bc8650 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -393,40 +393,22 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHMaster m_master
 
 
-
-
-
-
-
-m_catalogJanitorEnabled
-privateboolean m_catalogJanitorEnabled
-
-
-
-
-
-
-
-m_catalogJanitorEnabled__IsNotDefault
-privateboolean m_catalogJanitorEnabled__IsNotDefault
-
-
-
+
 
 
 
 
-m_frags
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer m_frags
+m_filter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-privateboolean m_frags__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
 
@@ -435,7 +417,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_servers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
 
 
 
@@ -444,7 +426,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_servers__IsNotDefault
-privateboolean m_servers__IsNotDefault
+privateboolean m_servers__IsNotDefault
 
 
 
@@ -453,7 +435,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_assignmentManager
-privateAssignmentManager m_assignmentManager
+privateAssignmentManager m_assignmentManager
 
 
 
@@ -462,43 +444,43 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_assignmentManager__IsNotDefault
-privateboolean m_assignmentManager__IsNotDefault
+privateboolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_deadServers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_deadServers__IsNotDefault
+privateboolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
+m_catalogJanitorEnabled
+privateboolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-privateboolean m_deadServers__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+privateboolean m_catalogJanitorEnabled__IsNotDefault
 
 
 
@@ -507,7 +489,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_metaLocation
-privateServerName m_metaLocation
+privateServerName m_metaLocation
 
 
 
@@ -516,7 +498,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_metaLocation__IsNotDefault
-privateboolean m_metaLocation__IsNotDefault
+privateboolean m_metaLocation__IsNotDefault
 
 
 
@@ -525,7 +507,7 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_serverManager
-privateServerManager m_serverManager
+privateServerManager m_serverManager
 
 
 
@@ -534,25 +516,43 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 
 
 m_serverManager__IsNotDefault
-privateboolean m_serverManager__IsNotDefault
+privateboolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_format
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
+
+
+
+
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
+
+
+
+
+
+
+
+m_frags
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/HConstants.Modify.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HConstants.Modify.html 
b/devapidocs/org/apache/hadoop/hbase/HConstants.Modify.html
index b4e2dbb..efe1e57 100644
--- a/devapidocs/org/apache/hadoop/hbase/HConstants.Modify.html
+++ b/devapidocs/org/apache/hadoop/hbase/HConstants.Modify.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static enum HConstants.Modify
+public static enum HConstants.Modify
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumHConstants.Modify
 modifyTable op for replacing the table descriptor
 
@@ -224,7 +224,7 @@ the order they are declared.
 
 
 CLOSE_REGION
-public static finalHConstants.Modify CLOSE_REGION
+public static finalHConstants.Modify CLOSE_REGION
 
 
 
@@ -233,7 +233,7 @@ the order they are declared.
 
 
 TABLE_COMPACT
-public static finalHConstants.Modify TABLE_COMPACT
+public static finalHConstants.Modify TABLE_COMPACT
 
 
 
@@ -242,7 +242,7 @@ the order they are declared.
 
 
 TABLE_FLUSH
-public static finalHConstants.Modify TABLE_FLUSH
+public static finalHConstants.Modify TABLE_FLUSH
 
 
 
@@ -251,7 +251,7 @@ the order they are declared.
 
 
 TABLE_MAJOR_COMPACT
-public static finalHConstants.Modify TABLE_MAJOR_COMPACT
+public static finalHConstants.Modify TABLE_MAJOR_COMPACT
 
 
 
@@ -260,7 +260,7 @@ the order they are declared.
 
 
 TABLE_SET_HTD
-public static finalHConstants.Modify TABLE_SET_HTD
+public static finalHConstants.Modify TABLE_SET_HTD
 
 
 
@@ -269,7 +269,7 @@ the order they are declared.
 
 
 TABLE_SPLIT
-public static finalHConstants.Modify TABLE_SPLIT
+public static finalHConstants.Modify TABLE_SPLIT
 
 
 
@@ -286,7 +286,7 @@ the order they are declared.
 
 
 values
-public staticHConstants.Modify[]values()
+public staticHConstants.Modify[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -306,7 +306,7 @@ for (HConstants.Modify c : HConstants.Modify.values())
 
 
 valueOf
-public staticHConstants.ModifyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticHConstants.ModifyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 



[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HConstants.html 
b/apidocs/org/apache/hadoop/hbase/HConstants.html
index 30ba255..de97f75 100644
--- a/apidocs/org/apache/hadoop/hbase/HConstants.html
+++ b/apidocs/org/apache/hadoop/hbase/HConstants.html
@@ -863,6 +863,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+Parameter name for HBase client meta replica scan call 
timeout.
+
+
+
+static int
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+Default HBase client meta replica scan call timeout, 1 
second
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 HBASE_CLIENT_META_OPERATION_TIMEOUT
 Parameter name for HBase client operation timeout.
 
@@ -2967,13 +2979,41 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+Parameter name for HBase client meta replica scan call 
timeout.
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+public static finalint HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+Default HBase client meta replica scan call timeout, 1 
second
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 HREGION_LOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_LOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_LOGDIR_NAME
 Used to construct the name of the log directory for a 
region server
 
 See Also:
@@ -2987,7 +3027,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SPLIT_LOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SPLIT_LOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SPLIT_LOGDIR_NAME
 Used to construct the name of the splitlog directory for a 
region server
 
 See Also:
@@ -3001,7 +3041,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HREGION_OLDLOGDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_OLDLOGDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HREGION_OLDLOGDIR_NAME
 Like the previous, but for old logs that are about to be 
deleted
 
 See Also:
@@ -3015,7 +3055,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BULKLOAD_STAGING_DIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BULKLOAD_STAGING_DIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BULKLOAD_STAGING_DIR_NAME
 Staging dir used by bulk load
 
 See Also:
@@ -3029,7 +3069,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CORRUPT_DIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CORRUPT_DIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CORRUPT_DIR_NAME
 
 See Also:
 Constant
 Field Values
@@ -3042,7 +3082,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HBCK_SIDELINEDIR_NAME
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBCK_SIDELINEDIR_NAME
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBCK_SIDELINEDIR_NAME
 Used by HBCK to sideline backup data
 
 See Also:
@@ -3056,7 +3096,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MIGRATION_NAME
-public 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
index 8adbacb..ac13492 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
@@ -315,1063 +315,1070 @@
 307  /** Default HBase client operation 
timeout, which is tantamount to a blocking call */
 308  public static final int 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = 120;
 309
-310  /** Used to construct the name of the 
log directory for a region server */
-311  public static final String 
HREGION_LOGDIR_NAME = "WALs";
-312
-313  /** Used to construct the name of the 
splitlog directory for a region server */
-314  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
-315
-316  /** Like the previous, but for old logs 
that are about to be deleted */
-317  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
-318
-319  /** Staging dir used by bulk load */
-320  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
-321
-322  public static final String 
CORRUPT_DIR_NAME = "corrupt";
-323
-324  /** Used by HBCK to sideline backup 
data */
-325  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
-326
-327  /** Any artifacts left from migration 
can be moved here */
-328  public static final String 
MIGRATION_NAME = ".migration";
-329
-330  /**
-331   * The directory from which 
co-processor/custom filter jars can be loaded
-332   * dynamically by the region servers. 
This value can be overridden by the
-333   * hbase.dynamic.jars.dir config.
-334   */
-335  public static final String LIB_DIR = 
"lib";
+310  /** Parameter name for HBase client 
meta replica scan call timeout. */
+311  public static final String 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT =
+312  
"hbase.client.meta.replica.scan.timeout";
+313
+314  /** Default HBase client meta replica 
scan call timeout, 1 second */
+315  public static final int 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT = 100;
+316
+317  /** Used to construct the name of the 
log directory for a region server */
+318  public static final String 
HREGION_LOGDIR_NAME = "WALs";
+319
+320  /** Used to construct the name of the 
splitlog directory for a region server */
+321  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
+322
+323  /** Like the previous, but for old logs 
that are about to be deleted */
+324  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
+325
+326  /** Staging dir used by bulk load */
+327  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
+328
+329  public static final String 
CORRUPT_DIR_NAME = "corrupt";
+330
+331  /** Used by HBCK to sideline backup 
data */
+332  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
+333
+334  /** Any artifacts left from migration 
can be moved here */
+335  public static final String 
MIGRATION_NAME = ".migration";
 336
-337  /** Used to construct the name of the 
compaction directory during compaction */
-338  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
-339
-340  /** Conf key for the max file size 
after which we split the region */
-341  public static final String 
HREGION_MAX_FILESIZE =
-342  "hbase.hregion.max.filesize";
+337  /**
+338   * The directory from which 
co-processor/custom filter jars can be loaded
+339   * dynamically by the region servers. 
This value can be overridden by the
+340   * hbase.dynamic.jars.dir config.
+341   */
+342  public static final String LIB_DIR = 
"lib";
 343
-344  /** Default maximum file size */
-345  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+344  /** Used to construct the name of the 
compaction directory during compaction */
+345  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
 346
-347  /**
-348   * Max size of single row for Get's or 
Scan's without in-row scanning flag set.
-349   */
-350  public static final String 
TABLE_MAX_ROWSIZE_KEY = "hbase.table.max.rowsize";
-351
-352  /**
-353   * Default max row size (1 Gb).
-354   */
-355  public static final long 
TABLE_MAX_ROWSIZE_DEFAULT = 1024 * 1024 * 1024L;
-356
-357  /**
-358   * The max number of threads used for 
opening and closing stores or store
-359   * files in parallel
-360   */
-361  public static final String 
HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
-362
"hbase.hstore.open.and.close.threads.max";
+347  /** Conf key for the max file size 
after which we split the region */
+348  public static final String 
HREGION_MAX_FILESIZE =
+349  "hbase.hregion.max.filesize";
+350
+351  /** Default maximum 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* time
-239this.numTries = 

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c635e71b
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c635e71b
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c635e71b

Branch: refs/heads/asf-site
Commit: c635e71bac0379189f47ec304101c69bd6a62760
Parents: b78b944
Author: jenkins 
Authored: Fri May 19 14:58:29 2017 +
Committer: jenkins 
Committed: Fri May 19 14:58:29 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 14386 +
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/constant-values.html|14 +
 apidocs/index-all.html  | 8 +
 apidocs/org/apache/hadoop/hbase/HConstants.html |   558 +-
 .../apache/hadoop/hbase/client/Increment.html   |42 +-
 .../org/apache/hadoop/hbase/HConstants.html |  1981 +--
 .../apache/hadoop/hbase/client/Increment.html   |   427 +-
 book.html   |15 +
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |   562 +-
 checkstyle.rss  | 4 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |20 +-
 devapidocs/index-all.html   |20 +-
 .../apache/hadoop/hbase/HConstants.Modify.html  |18 +-
 .../org/apache/hadoop/hbase/HConstants.html |   560 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hbase/classification/package-tree.html  | 8 +-
 .../hbase/client/ConnectionConfiguration.html   |   112 +-
 ...ectionImplementation.MasterServiceState.html |18 +-
 ...onImplementation.MasterServiceStubMaker.html |10 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |10 +-
 ...ectionImplementation.ServerErrorTracker.html |20 +-
 .../hbase/client/ConnectionImplementation.html  |   292 +-
 .../ConnectionUtils.MasterlessConnection.html   | 2 +-
 .../hbase/client/CoprocessorHConnection.html| 2 +-
 .../org/apache/hadoop/hbase/client/HTable.html  |66 +-
 .../apache/hadoop/hbase/client/Increment.html   |42 +-
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |12 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 4 +-
 .../hadoop/hbase/regionserver/package-tree.html |20 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   250 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |   100 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |50 +-
 .../regionserver/RSStatusTmpl.ImplData.html |60 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |24 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |12 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 8 +-
 .../apache/hadoop/hbase/wal/package-tree.html   | 2 +-
 .../hbase/zookeeper/RecoverableZooKeeper.html   |16 +-
 .../zookeeper/ZKUtil.JaasConfiguration.html |30 +-
 .../hbase/zookeeper/ZKUtil.NodeAndData.html |16 +-
 .../ZKUtil.ZKUtilOp.CreateAndFailSilent.html|12 +-
 .../ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html   | 8 +-
 .../zookeeper/ZKUtil.ZKUtilOp.SetData.html  |12 +-
 .../hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html |14 +-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.html   |   138 +-
 .../apache/hadoop/hbase/HConstants.Modify.html  |  1981 +--
 .../hbase/HConstants.OperationStatusCode.html   |  1981 +--
 .../org/apache/hadoop/hbase/HConstants.html |  1981 +--
 .../org/apache/hadoop/hbase/Version.html| 6 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 963d6e6..2cdf326 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2167
 0
 0
-14405
+14407
 
 Files
 
@@ -907,7 +907,7 @@
 org/apache/hadoop/hbase/client/ConnectionImplementation.java
 0
 0
-4
+6
 
 org/apache/hadoop/hbase/client/CoprocessorHConnection.java
 0
@@ -7196,12 +7196,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-780
+785
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3269
+3265
 Error
 
 misc
@@ -7219,7 +7219,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-800
+801
 Error
 
 
@@ -8892,7 +8892,7 @@
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-585
+592
 
 org/apache/hadoop/hbase/HRegionInfo.java
 
@@ -12895,7 +12895,7 @@
 
 Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
 123
 
@@ -15525,22 +15525,34 @@
 121
 
 Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+313
+
+Error
+sizes
+LineLength
+Line is longer than 100 characters (found 102).
+835
+
+Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-1015
+1027
 
 Error
 sizes
 MethodLength
 Method length is 492 lines (max allowed is 150).
-1249
+1261
 
 Error
 coding
 NoFinalizer
 Avoid using finalizer method.
-1972
+1984
 
 org/apache/hadoop/hbase/client/CoprocessorHConnection.java
 
@@ -15973,7 +15985,7 @@
 
 Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 1957
 
@@ -16263,121 +16275,121 @@
 sizes
 LineLength
 Line is longer than 100 characters (found 113).
-752
+750
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-914
+912
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-915
+913
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-943
+941
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1041
+1039
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1145
+1143
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1159
+1157
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1165
+1163
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1177
+1175
 
 Error
 blocks
 LeftCurly
 '{' at column 34 should have line break after.
-1187
+1185
 
 Error
 annotation
 MissingDeprecated
 Must include both @java.lang.Deprecated annotation and @deprecated Javadoc 
tag with description.
-1189
+1187
 
 Error
 indentation
 Indentation
 'method def modifier' have incorrect indentation level 6, expected level 
should be one of the following: 8, 10.
-1208
+1206
 
 Error
 indentation
 Indentation
 'if' have incorrect indentation level 8, expected level should be one of 
the following: 10, 12.
-1210
+1208
 
 Error
 indentation
 Indentation
 'if' child have incorrect indentation level 10, expected level should be 
one of the following: 12, 14.
-1211
+1209
 
 Error
 indentation
 Indentation
 'if rcurly' have incorrect indentation level 8, expected level should be 
one of the following: 10, 12.
-1212
+1210
 
 Error
 indentation
 Indentation
 'method def rcurly' have incorrect indentation level 6, expected level 
should be one of the following: 8, 10.
-1213
+1211
 
 Error
 indentation
 Indentation
 'object def rcurly' have incorrect indentation level 4, expected level 
should be one of the following: 6, 8.
-1214
+1212
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-1247
+1245
 
 Error
 indentation
 Indentation
 '=' have incorrect indentation level 4, expected level should be 6.
-1270
+1268
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 111).
-1290
+1288
 
 

[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* 

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index beedcc1..fa6d2e0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -532,7 +532,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getTimeRange
-publicTimeRangegetTimeRange()
+publicTimeRangegetTimeRange()
 Gets the TimeRange used for this increment.
 
 Returns:
@@ -546,7 +546,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-publicIncrementsetTimeRange(longminStamp,
+publicIncrementsetTimeRange(longminStamp,
   longmaxStamp)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -574,7 +574,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-publicIncrementsetReturnResults(booleanreturnResults)
+publicIncrementsetReturnResults(booleanreturnResults)
 
 Overrides:
 setReturnResultsin
 classMutation
@@ -591,7 +591,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Overrides:
 isReturnResultsin
 classMutation
@@ -606,7 +606,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 numFamilies
-publicintnumFamilies()
+publicintnumFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -622,7 +622,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-publicbooleanhasFamilies()
+publicbooleanhasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 
 Returns:
@@ -636,7 +636,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
 Before 0.95, when you called Increment#getFamilyMap(), you 
got back
  a map of families to a list of Longs.  Now, Mutation.getFamilyCellMap()
 returns
  families by list of Cells.  This method has been added so you can have the
@@ -655,7 +655,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 Description copied from 
class:Operation
 Produces a string representation of this Operation. It 
defaults to a JSON
  representation, but falls back to a string representation of the
@@ -674,7 +674,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 compareTo
-publicintcompareTo(Rowi)
+publicintcompareTo(Rowi)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-;
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRow
@@ -689,7 +689,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hashCode
-publicinthashCode()
+publicinthashCode()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCodein 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -702,7 +702,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 equals

hbase-site git commit: INFRA-10751 Empty commit

2017-05-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c635e71ba -> b2ec5b9cb


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/b2ec5b9c
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/b2ec5b9c
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/b2ec5b9c

Branch: refs/heads/asf-site
Commit: b2ec5b9cb05354431142406be1e7693bc85361f8
Parents: c635e71
Author: jenkins 
Authored: Fri May 19 14:58:50 2017 +
Committer: jenkins 
Committed: Fri May 19 14:58:50 2017 +

--

--




[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.html
index 44d74bd..78064cf 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.html
@@ -225,18 +225,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-RecoverableZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringquorumServers,
+RecoverableZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringquorumServers,
 intsessionTimeout,
 org.apache.zookeeper.Watcherwatcher,
 intmaxRetries,
-intretryIntervalMillis)
+intretryIntervalMillis,
+intmaxSleepTime)
 
 
-RecoverableZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringquorumServers,
+RecoverableZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringquorumServers,
 intsessionTimeout,
 org.apache.zookeeper.Watcherwatcher,
 intmaxRetries,
 intretryIntervalMillis,
+intmaxSleepTime,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringidentifier)
 
 
@@ -602,7 +604,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Constructor Detail
-
+
 
 
 
@@ -612,7 +614,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 intsessionTimeout,
 org.apache.zookeeper.Watcherwatcher,
 intmaxRetries,
-intretryIntervalMillis)
+intretryIntervalMillis,
+intmaxSleepTime)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -620,7 +623,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 
 
@@ -631,6 +634,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 org.apache.zookeeper.Watcherwatcher,
 intmaxRetries,
 intretryIntervalMillis,
+intmaxSleepTime,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringidentifier)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
index 39fd006..87fd177 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class ZKUtil.JaasConfiguration
+private static class ZKUtil.JaasConfiguration
 extends http://docs.oracle.com/javase/8/docs/api/javax/security/auth/login/Configuration.html?is-external=true;
 title="class or interface in javax.security.auth.login">Configuration
 A JAAS configuration that defines the login modules that we 
want to use for login.
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/javax/security/auth/lo
 
 
 SERVER_KEYTAB_KERBEROS_CONFIG_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SERVER_KEYTAB_KERBEROS_CONFIG_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SERVER_KEYTAB_KERBEROS_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -293,7 +293,7 @@ extends 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
index c7fc597..5732c92 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-065@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-066@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-068@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-069@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-071@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-072@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+064@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+065@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+066@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+067@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+068@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+069@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+070@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+071@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+072@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,159 +118,159 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 25, 1
-114public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
+113// 26, 1
+114public void setFilter(String 
filter)
 115{
-116  // 25, 1
-117  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
-118  
m_catalogJanitorEnabled__IsNotDefault = true;
+116  // 26, 1
+117  m_filter = filter;
+118  m_filter__IsNotDefault = true;
 119}
-120public boolean 
getCatalogJanitorEnabled()
+120public String getFilter()
 121{
-122  return m_catalogJanitorEnabled;
+122  return m_filter;
 123}
-124private boolean 
m_catalogJanitorEnabled;
-125public boolean 
getCatalogJanitorEnabled__IsNotDefault()
+124private String m_filter;
+125public boolean 
getFilter__IsNotDefault()
 126{
-127  return 
m_catalogJanitorEnabled__IsNotDefault;
+127  return m_filter__IsNotDefault;
 128}
-129private boolean 
m_catalogJanitorEnabled__IsNotDefault;
-130// 21, 1
-131public void 
setFrags(MapString,Integer frags)
+129private boolean 
m_filter__IsNotDefault;
+130// 23, 1
+131public void 
setServers(ListServerName servers)
 132{
-133  // 21, 1
-134  m_frags = frags;
-135  m_frags__IsNotDefault = true;
+133  // 23, 1
+134  m_servers = servers;
+135  m_servers__IsNotDefault = true;
 136}
-137public MapString,Integer 
getFrags()
+137public ListServerName 
getServers()
 138{
-139  return m_frags;
+139  return m_servers;
 140}
-141private MapString,Integer 
m_frags;
-142public boolean 
getFrags__IsNotDefault()
+141private ListServerName 
m_servers;
+142public boolean 
getServers__IsNotDefault()
 143{
-144  return m_frags__IsNotDefault;
+144  return m_servers__IsNotDefault;
 145}
-146private boolean 
m_frags__IsNotDefault;
-147// 23, 1
-148public void 
setServers(ListServerName servers)
+146private boolean 
m_servers__IsNotDefault;
+147// 29, 1
+148public void 
setAssignmentManager(AssignmentManager assignmentManager)
 149{
-150  // 23, 1
-151  m_servers = servers;
-152  m_servers__IsNotDefault = true;
+150  // 29, 1
+151  m_assignmentManager = 
assignmentManager;
+152  m_assignmentManager__IsNotDefault = 
true;
 153}
-154public ListServerName 
getServers()
+154public AssignmentManager 
getAssignmentManager()
 155{
-156  return m_servers;
+156  return m_assignmentManager;
 157}
-158 

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
index 0ea756b..bb52f77 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
@@ -870,7 +870,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 loginServer
-public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
@@ -898,7 +898,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 loginClient
-public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
@@ -926,7 +926,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 login
-private staticvoidlogin(org.apache.hadoop.conf.Configurationconf,
+private staticvoidlogin(org.apache.hadoop.conf.Configurationconf,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
@@ -958,7 +958,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 joinZNode
-public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringjoinZNode(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringprefix,
+public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringjoinZNode(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringprefix,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringsuffix)
 Join the prefix znode name with the suffix znode name to 
generate a proper
  full znode name.
@@ -979,7 +979,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getParent
-public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParent(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnode)
+public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParent(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnode)
 Returns the full path of the immediate parent of the 
specified node.
 
 Parameters:
@@ -995,7 +995,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNodeName
-public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNodeName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpath)
+public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 8adbacb..ac13492 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -315,1063 +315,1070 @@
 307  /** Default HBase client operation 
timeout, which is tantamount to a blocking call */
 308  public static final int 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = 120;
 309
-310  /** Used to construct the name of the 
log directory for a region server */
-311  public static final String 
HREGION_LOGDIR_NAME = "WALs";
-312
-313  /** Used to construct the name of the 
splitlog directory for a region server */
-314  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
-315
-316  /** Like the previous, but for old logs 
that are about to be deleted */
-317  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
-318
-319  /** Staging dir used by bulk load */
-320  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
-321
-322  public static final String 
CORRUPT_DIR_NAME = "corrupt";
-323
-324  /** Used by HBCK to sideline backup 
data */
-325  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
-326
-327  /** Any artifacts left from migration 
can be moved here */
-328  public static final String 
MIGRATION_NAME = ".migration";
-329
-330  /**
-331   * The directory from which 
co-processor/custom filter jars can be loaded
-332   * dynamically by the region servers. 
This value can be overridden by the
-333   * hbase.dynamic.jars.dir config.
-334   */
-335  public static final String LIB_DIR = 
"lib";
+310  /** Parameter name for HBase client 
meta replica scan call timeout. */
+311  public static final String 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT =
+312  
"hbase.client.meta.replica.scan.timeout";
+313
+314  /** Default HBase client meta replica 
scan call timeout, 1 second */
+315  public static final int 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT = 100;
+316
+317  /** Used to construct the name of the 
log directory for a region server */
+318  public static final String 
HREGION_LOGDIR_NAME = "WALs";
+319
+320  /** Used to construct the name of the 
splitlog directory for a region server */
+321  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
+322
+323  /** Like the previous, but for old logs 
that are about to be deleted */
+324  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
+325
+326  /** Staging dir used by bulk load */
+327  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
+328
+329  public static final String 
CORRUPT_DIR_NAME = "corrupt";
+330
+331  /** Used by HBCK to sideline backup 
data */
+332  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
+333
+334  /** Any artifacts left from migration 
can be moved here */
+335  public static final String 
MIGRATION_NAME = ".migration";
 336
-337  /** Used to construct the name of the 
compaction directory during compaction */
-338  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
-339
-340  /** Conf key for the max file size 
after which we split the region */
-341  public static final String 
HREGION_MAX_FILESIZE =
-342  "hbase.hregion.max.filesize";
+337  /**
+338   * The directory from which 
co-processor/custom filter jars can be loaded
+339   * dynamically by the region servers. 
This value can be overridden by the
+340   * hbase.dynamic.jars.dir config.
+341   */
+342  public static final String LIB_DIR = 
"lib";
 343
-344  /** Default maximum file size */
-345  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+344  /** Used to construct the name of the 
compaction directory during compaction */
+345  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
 346
-347  /**
-348   * Max size of single row for Get's or 
Scan's without in-row scanning flag set.
-349   */
-350  public static final String 
TABLE_MAX_ROWSIZE_KEY = "hbase.table.max.rowsize";
-351
-352  /**
-353   * Default max row size (1 Gb).
-354   */
-355  public static final long 
TABLE_MAX_ROWSIZE_DEFAULT = 1024 * 1024 * 1024L;
-356
-357  /**
-358   * The max number of threads used for 
opening and closing stores or store
-359   * files in parallel
-360   */
-361  public static final String 
HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
-362
"hbase.hstore.open.and.close.threads.max";
+347  /** Conf key for the max file size 
after which we split the region */
+348  public static final String 
HREGION_MAX_FILESIZE =
+349  "hbase.hregion.max.filesize";
+350
+351  /** Default maximum file size */
+352  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+353
+354  

[40/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 93079d7..a6ce1bc 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":9,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":9,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":9,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":9,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -262,76 +262,80 @@ implements metaRegionLock
 
 
+private int
+metaReplicaCallTimeoutScanInMicroSecond
+
+
 private MetricsConnection
 metrics
 
-
+
 private static NonceGenerator
 nonceGenerator
 Global nonceGenerator shared per client.Currently there's 
no reason to limit its scope.
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 nonceGeneratorCreateLock
 The nonce generator lock.
 
 
-
+
 private int
 numTries
 
-
+
 private long
 pause
 
-
+
 private long
 pauseForCQTBE
 
-
+
 (package private) Registry
 registry
 Cluster registry of basic info such as clusterid and meta 
region location.
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RESOLVE_HOSTNAME_ON_FAIL_KEY
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RETRIES_BY_SERVER_KEY
 
-
+
 private RpcRetryingCallerFactory
 rpcCallerFactory
 
-
+
 private RpcClient
 rpcClient
 
-
+
 private RpcControllerFactory
 rpcControllerFactory
 
-
+
 (package private) int
 rpcTimeout
 
-
+
 private ServerStatisticTracker
 stats
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 stubs
 
-
+
 private boolean
 useMetaReplicas
 
-
+
 protected User
 user
 
@@ -783,27 +787,31 @@ implements setupRegistry()
 
 
+(package private) void
+setUseMetaReplicas(booleanuseMetaReplicas)
+
+
 private void
 shutdownBatchPool(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 
-
+
 private void
 shutdownPools()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 An identifier that will remain the same for a given 
connection.
 
 
-
+
 (package private) void
 updateCachedLocation(HRegionInfohri,
 ServerNamesource,
 ServerNameserverName,
 longseqNum)
 
-
+
 void
 updateCachedLocations(TableNametableName,
  byte[]regionName,
@@ -911,7 +919,16 @@ implements 
 
 useMetaReplicas
-private finalboolean useMetaReplicas

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* time
-239this.numTries = 

[45/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 7b99d74..366fcc2 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -127,222 +127,219 @@
 119if (family == null) {
 120  throw new 
IllegalArgumentException("family cannot be null");
 121}
-122if (qualifier == null) {
-123  throw new 
IllegalArgumentException("qualifier cannot be null");
-124}
-125ListCell list = 
getCellList(family);
-126KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-127list.add(kv);
-128
familyMap.put(CellUtil.cloneFamily(kv), list);
-129return this;
-130  }
-131
-132  /**
-133   * Gets the TimeRange used for this 
increment.
-134   * @return TimeRange
-135   */
-136  public TimeRange getTimeRange() {
-137return this.tr;
-138  }
-139
-140  /**
-141   * Sets the TimeRange to be used on the 
Get for this increment.
-142   * p
-143   * This is useful for when you have 
counters that only last for specific
-144   * periods of time (ie. counters that 
are partitioned by time).  By setting
-145   * the range of valid times for this 
increment, you can potentially gain
-146   * some performance with a more optimal 
Get operation.
-147   * p
-148   * This range is used as [minStamp, 
maxStamp).
-149   * @param minStamp minimum timestamp 
value, inclusive
-150   * @param maxStamp maximum timestamp 
value, exclusive
-151   * @throws IOException if invalid time 
range
-152   * @return this
-153   */
-154  public Increment setTimeRange(long 
minStamp, long maxStamp)
-155  throws IOException {
-156tr = new TimeRange(minStamp, 
maxStamp);
-157return this;
-158  }
-159  
-160  /**
-161   * @param returnResults True (default) 
if the increment operation should return the results. A
-162   *  client that is not 
interested in the result can save network bandwidth setting this
-163   *  to false.
-164   */
-165  public Increment 
setReturnResults(boolean returnResults) {
-166
super.setReturnResults(returnResults);
-167return this;
-168  }
-169
-170  /**
-171   * @return current setting for 
returnResults
-172   */
-173  // This method makes public the 
superclasses's protected method.
-174  public boolean isReturnResults() {
-175return super.isReturnResults();
-176  }
-177
-178  /**
-179   * Method for retrieving the number of 
families to increment from
-180   * @return number of families
-181   */
-182  @Override
-183  public int numFamilies() {
-184return this.familyMap.size();
-185  }
-186
-187  /**
-188   * Method for checking if any families 
have been inserted into this Increment
-189   * @return true if familyMap is non 
empty false otherwise
-190   */
-191  public boolean hasFamilies() {
-192return !this.familyMap.isEmpty();
-193  }
-194
-195  /**
-196   * Before 0.95, when you called 
Increment#getFamilyMap(), you got back
-197   * a map of families to a list of 
Longs.  Now, {@link #getFamilyCellMap()} returns
-198   * families by list of Cells.  This 
method has been added so you can have the
-199   * old behavior.
-200   * @return Map of families to a Map of 
qualifiers and their Long increments.
-201   * @since 0.95.0
-202   */
-203  public Mapbyte[], 
NavigableMapbyte [], Long getFamilyMapOfLongs() {
-204NavigableMapbyte[], 
ListCell map = super.getFamilyCellMap();
-205Mapbyte [], 
NavigableMapbyte[], Long results = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-206for (Map.Entrybyte [], 
ListCell entry: map.entrySet()) {
-207  NavigableMapbyte [], Long 
longs = new TreeMap(Bytes.BYTES_COMPARATOR);
-208  for (Cell cell: entry.getValue()) 
{
-209
longs.put(CellUtil.cloneQualifier(cell),
-210
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength()));
-211  }
-212  results.put(entry.getKey(), 
longs);
-213}
-214return results;
-215  }
-216
-217  /**
-218   * @return String
-219   */
-220  @Override
-221  public String toString() {
-222StringBuilder sb = new 
StringBuilder();
-223sb.append("row=");
-224
sb.append(Bytes.toStringBinary(this.row));
-225if(this.familyMap.isEmpty()) {
-226  sb.append(", no columns set to be 
incremented");
-227  return sb.toString();
-228}
-229sb.append(", families=");
-230boolean moreThanOne = false;
-231for(Map.Entrybyte [], 
ListCell entry: this.familyMap.entrySet()) {
-232  if(moreThanOne) {
-233sb.append("), ");
-234  } else {
-235moreThanOne = true;
-236sb.append("{");
-237  }
-238  sb.append("(family=");
-239  

[49/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apache_hbase_reference_guide.pdfmarks
--
diff --git a/apache_hbase_reference_guide.pdfmarks 
b/apache_hbase_reference_guide.pdfmarks
index c4634a1..f5a0e1b 100644
--- a/apache_hbase_reference_guide.pdfmarks
+++ b/apache_hbase_reference_guide.pdfmarks
@@ -2,8 +2,8 @@
   /Author (Apache HBase Team)
   /Subject ()
   /Keywords ()
-  /ModDate (D:20170518144944)
-  /CreationDate (D:20170518144944)
+  /ModDate (D:20170519144842)
+  /CreationDate (D:20170519144842)
   /Creator (Asciidoctor PDF 1.5.0.alpha.6, based on Prawn 1.2.1)
   /Producer ()
   /DOCINFO pdfmark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/constant-values.html
--
diff --git a/apidocs/constant-values.html b/apidocs/constant-values.html
index ea1cadd..58b34e8 100644
--- a/apidocs/constant-values.html
+++ b/apidocs/constant-values.html
@@ -1340,6 +1340,20 @@
 "hbase.client.max.total.tasks"
 
 
+
+
+publicstaticfinalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
+"hbase.client.meta.replica.scan.timeout"
+
+
+
+
+publicstaticfinalint
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
+100
+
+
 
 
 publicstaticfinalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index b268fc6..c19c537 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -7415,6 +7415,14 @@
 
 The maximum number of concurrent connections the client 
will maintain.
 
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT
 - Static variable in class org.apache.hadoop.hbase.HConstants
+
+Parameter name for HBase client meta replica scan call 
timeout.
+
+HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT
 - Static variable in class org.apache.hadoop.hbase.HConstants
+
+Default HBase client meta replica scan call timeout, 1 
second
+
 HBASE_CLIENT_META_OPERATION_TIMEOUT
 - Static variable in class org.apache.hadoop.hbase.HConstants
 
 Parameter name for HBase client operation timeout.



[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.Modify.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.Modify.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.Modify.html
index 8adbacb..ac13492 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.Modify.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.Modify.html
@@ -315,1063 +315,1070 @@
 307  /** Default HBase client operation 
timeout, which is tantamount to a blocking call */
 308  public static final int 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = 120;
 309
-310  /** Used to construct the name of the 
log directory for a region server */
-311  public static final String 
HREGION_LOGDIR_NAME = "WALs";
-312
-313  /** Used to construct the name of the 
splitlog directory for a region server */
-314  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
-315
-316  /** Like the previous, but for old logs 
that are about to be deleted */
-317  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
-318
-319  /** Staging dir used by bulk load */
-320  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
-321
-322  public static final String 
CORRUPT_DIR_NAME = "corrupt";
-323
-324  /** Used by HBCK to sideline backup 
data */
-325  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
-326
-327  /** Any artifacts left from migration 
can be moved here */
-328  public static final String 
MIGRATION_NAME = ".migration";
-329
-330  /**
-331   * The directory from which 
co-processor/custom filter jars can be loaded
-332   * dynamically by the region servers. 
This value can be overridden by the
-333   * hbase.dynamic.jars.dir config.
-334   */
-335  public static final String LIB_DIR = 
"lib";
+310  /** Parameter name for HBase client 
meta replica scan call timeout. */
+311  public static final String 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT =
+312  
"hbase.client.meta.replica.scan.timeout";
+313
+314  /** Default HBase client meta replica 
scan call timeout, 1 second */
+315  public static final int 
HBASE_CLIENT_MEAT_REPLICA_SCAN_TIMEOUT_DEFAULT = 100;
+316
+317  /** Used to construct the name of the 
log directory for a region server */
+318  public static final String 
HREGION_LOGDIR_NAME = "WALs";
+319
+320  /** Used to construct the name of the 
splitlog directory for a region server */
+321  public static final String 
SPLIT_LOGDIR_NAME = "splitWAL";
+322
+323  /** Like the previous, but for old logs 
that are about to be deleted */
+324  public static final String 
HREGION_OLDLOGDIR_NAME = "oldWALs";
+325
+326  /** Staging dir used by bulk load */
+327  public static final String 
BULKLOAD_STAGING_DIR_NAME = "staging";
+328
+329  public static final String 
CORRUPT_DIR_NAME = "corrupt";
+330
+331  /** Used by HBCK to sideline backup 
data */
+332  public static final String 
HBCK_SIDELINEDIR_NAME = ".hbck";
+333
+334  /** Any artifacts left from migration 
can be moved here */
+335  public static final String 
MIGRATION_NAME = ".migration";
 336
-337  /** Used to construct the name of the 
compaction directory during compaction */
-338  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
-339
-340  /** Conf key for the max file size 
after which we split the region */
-341  public static final String 
HREGION_MAX_FILESIZE =
-342  "hbase.hregion.max.filesize";
+337  /**
+338   * The directory from which 
co-processor/custom filter jars can be loaded
+339   * dynamically by the region servers. 
This value can be overridden by the
+340   * hbase.dynamic.jars.dir config.
+341   */
+342  public static final String LIB_DIR = 
"lib";
 343
-344  /** Default maximum file size */
-345  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 * 1024 * 1024 * 1024L;
+344  /** Used to construct the name of the 
compaction directory during compaction */
+345  public static final String 
HREGION_COMPACTIONDIR_NAME = "compaction.dir";
 346
-347  /**
-348   * Max size of single row for Get's or 
Scan's without in-row scanning flag set.
-349   */
-350  public static final String 
TABLE_MAX_ROWSIZE_KEY = "hbase.table.max.rowsize";
-351
-352  /**
-353   * Default max row size (1 Gb).
-354   */
-355  public static final long 
TABLE_MAX_ROWSIZE_DEFAULT = 1024 * 1024 * 1024L;
-356
-357  /**
-358   * The max number of threads used for 
opening and closing stores or store
-359   * files in parallel
-360   */
-361  public static final String 
HSTORE_OPEN_AND_CLOSE_THREADS_MAX =
-362
"hbase.hstore.open.and.close.threads.max";
+347  /** Conf key for the max file size 
after which we split the region */
+348  public static final String 
HREGION_MAX_FILESIZE =
+349  "hbase.hregion.max.filesize";
+350
+351  /** Default maximum file size */
+352  public static final long 
DEFAULT_MAX_FILE_SIZE = 10 

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* time
-239this.numTries = 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
index 15c41e8..d1b87db 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
@@ -724,616 +724,614 @@
 716  npe = new NullPointerException("row 
is null");
 717} else if (family == null) {
 718  npe = new 
NullPointerException("family is null");
-719} else if (qualifier == null) {
-720  npe = new 
NullPointerException("qualifier is null");
-721}
-722if (npe != null) {
-723  throw new IOException(
-724  "Invalid arguments to 
incrementColumnValue", npe);
-725}
-726
-727
NoncedRegionServerCallableLong callable =
-728new 
NoncedRegionServerCallableLong(this.connection, getName(), row,
-729
this.rpcControllerFactory.newController()) {
-730  @Override
-731  protected Long rpcCall() throws 
Exception {
-732MutateRequest request = 
RequestConverter.buildIncrementRequest(
-733  
getLocation().getRegionInfo().getRegionName(), row, family,
-734  qualifier, amount, durability, 
getNonceGroup(), getNonce());
-735MutateResponse response = 
doMutate(request);
-736Result result = 
ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner());
-737return 
Long.valueOf(Bytes.toLong(result.getValue(family, qualifier)));
-738  }
-739};
-740return rpcCallerFactory.Long 
newCaller(this.writeRpcTimeout).
-741callWithRetries(callable, 
this.operationTimeout);
-742  }
-743
-744  /**
-745   * {@inheritDoc}
-746   */
-747  @Override
-748  public boolean checkAndPut(final byte 
[] row,
-749  final byte [] family, final byte [] 
qualifier, final byte [] value,
-750  final Put put)
-751  throws IOException {
-752ClientServiceCallableBoolean 
callable = new ClientServiceCallableBoolean(this.connection, getName(), 
row,
-753
this.rpcControllerFactory.newController()) {
-754  @Override
-755  protected Boolean rpcCall() throws 
Exception {
-756MutateRequest request = 
RequestConverter.buildMutateRequest(
-757  
getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
-758  new BinaryComparator(value), 
CompareType.EQUAL, put);
-759MutateResponse response = 
doMutate(request);
-760return 
Boolean.valueOf(response.getProcessed());
-761  }
-762};
-763return 
rpcCallerFactory.Boolean newCaller(this.writeRpcTimeout).
-764callWithRetries(callable, 
this.operationTimeout);
-765  }
-766
-767  /**
-768   * {@inheritDoc}
-769   */
-770  @Override
-771  public boolean checkAndPut(final byte 
[] row, final byte [] family,
-772  final byte [] qualifier, final 
CompareOp compareOp, final byte [] value,
-773  final Put put)
-774  throws IOException {
-775ClientServiceCallableBoolean 
callable =
-776new 
ClientServiceCallableBoolean(this.connection, getName(), row,
-777
this.rpcControllerFactory.newController()) {
-778  @Override
-779  protected Boolean rpcCall() throws 
Exception {
-780CompareType compareType = 
CompareType.valueOf(compareOp.name());
-781MutateRequest request = 
RequestConverter.buildMutateRequest(
-782  
getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
-783  new BinaryComparator(value), 
compareType, put);
-784MutateResponse response = 
doMutate(request);
-785return 
Boolean.valueOf(response.getProcessed());
-786  }
-787};
-788return 
rpcCallerFactory.Boolean newCaller(this.writeRpcTimeout).
-789callWithRetries(callable, 
this.operationTimeout);
-790  }
-791
-792  /**
-793   * {@inheritDoc}
-794   */
-795  @Override
-796  public boolean checkAndDelete(final 
byte [] row, final byte [] family, final byte [] qualifier,
-797  final byte [] value, final Delete 
delete) throws IOException {
-798return checkAndDelete(row, family, 
qualifier, CompareOp.EQUAL, value, delete);
-799  }
-800
-801  /**
-802   * {@inheritDoc}
-803   */
-804  @Override
-805  public boolean checkAndDelete(final 
byte [] row, final byte [] family,
-806  final byte [] qualifier, final 
CompareOp compareOp, final byte [] value,
-807  final Delete delete)
-808  throws IOException {
-809
CancellableRegionServerCallableSingleResponse callable =
-810new 
CancellableRegionServerCallableSingleResponse(
-811this.connection, getName(), 
row, this.rpcControllerFactory.newController(),
-812writeRpcTimeout, new 
RetryingTimeTracker().start()) {
-813  @Override
-814  protected SingleResponse rpcCall() 
throws Exception 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.html
index 5f53569..fea5523 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.html
@@ -233,271 +233,256 @@
 225} catch (IOException iox) {
 226  // success
 227}
-228try {
-229  // try null qualifier
-230  ht.incrementColumnValue(ROW, 
FAMILY, null, 5);
-231  fail("Should have thrown 
IOException");
-232} catch (IOException iox) {
-233  // success
-234}
-235// try null row
-236try {
-237  Increment incNoRow = new 
Increment((byte [])null);
-238  incNoRow.addColumn(FAMILY, COLUMN, 
5);
-239  fail("Should have thrown 
IllegalArgumentException");
-240} catch (IllegalArgumentException 
iax) {
-241  // success
-242} catch (NullPointerException npe) 
{
-243  // success
-244}
-245// try null family
-246try {
-247  Increment incNoFamily = new 
Increment(ROW);
-248  incNoFamily.addColumn(null, COLUMN, 
5);
-249  fail("Should have thrown 
IllegalArgumentException");
-250} catch (IllegalArgumentException 
iax) {
-251  // success
-252}
-253// try null qualifier
-254try {
-255  Increment incNoQualifier = new 
Increment(ROW);
-256  incNoQualifier.addColumn(FAMILY, 
null, 5);
-257  fail("Should have thrown 
IllegalArgumentException");
-258} catch (IllegalArgumentException 
iax) {
-259  // success
-260}
-261  }
-262
-263  @Test
-264  public void testIncrementOutOfOrder() 
throws Exception {
-265LOG.info("Starting " + 
this.name.getMethodName());
-266final TableName TABLENAME =
-267
TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
-268Table ht = 
TEST_UTIL.createTable(TABLENAME, FAMILY);
-269
-270byte [][] QUALIFIERS = new byte [][] 
{
-271  Bytes.toBytes("B"), 
Bytes.toBytes("A"), Bytes.toBytes("C")
-272};
+228// try null row
+229try {
+230  Increment incNoRow = new 
Increment((byte [])null);
+231  incNoRow.addColumn(FAMILY, COLUMN, 
5);
+232  fail("Should have thrown 
IllegalArgumentException");
+233} catch (IllegalArgumentException 
iax) {
+234  // success
+235} catch (NullPointerException npe) 
{
+236  // success
+237}
+238// try null family
+239try {
+240  Increment incNoFamily = new 
Increment(ROW);
+241  incNoFamily.addColumn(null, COLUMN, 
5);
+242  fail("Should have thrown 
IllegalArgumentException");
+243} catch (IllegalArgumentException 
iax) {
+244  // success
+245}
+246  }
+247
+248  @Test
+249  public void testIncrementOutOfOrder() 
throws Exception {
+250LOG.info("Starting " + 
this.name.getMethodName());
+251final TableName TABLENAME =
+252
TableName.valueOf(filterStringSoTableNameSafe(this.name.getMethodName()));
+253Table ht = 
TEST_UTIL.createTable(TABLENAME, FAMILY);
+254
+255byte [][] QUALIFIERS = new byte [][] 
{
+256  Bytes.toBytes("B"), 
Bytes.toBytes("A"), Bytes.toBytes("C")
+257};
+258
+259Increment inc = new Increment(ROW);
+260for (int i=0; iQUALIFIERS.length; 
i++) {
+261  inc.addColumn(FAMILY, 
QUALIFIERS[i], 1);
+262}
+263ht.increment(inc);
+264
+265// Verify expected results
+266Get get = new Get(ROW);
+267Result r = ht.get(get);
+268Cell [] kvs = r.rawCells();
+269assertEquals(3, kvs.length);
+270assertIncrementKey(kvs[0], ROW, 
FAMILY, QUALIFIERS[1], 1);
+271assertIncrementKey(kvs[1], ROW, 
FAMILY, QUALIFIERS[0], 1);
+272assertIncrementKey(kvs[2], ROW, 
FAMILY, QUALIFIERS[2], 1);
 273
-274Increment inc = new Increment(ROW);
-275for (int i=0; iQUALIFIERS.length; 
i++) {
-276  inc.addColumn(FAMILY, 
QUALIFIERS[i], 1);
-277}
-278ht.increment(inc);
-279
-280// Verify expected results
-281Get get = new Get(ROW);
-282Result r = ht.get(get);
-283Cell [] kvs = r.rawCells();
+274// Now try multiple columns again
+275inc = new Increment(ROW);
+276for (int i=0; iQUALIFIERS.length; 
i++) {
+277  inc.addColumn(FAMILY, 
QUALIFIERS[i], 1);
+278}
+279ht.increment(inc);
+280
+281// Verify
+282r = ht.get(get);
+283kvs = r.rawCells();
 284assertEquals(3, kvs.length);
-285assertIncrementKey(kvs[0], ROW, 
FAMILY, QUALIFIERS[1], 1);
-286assertIncrementKey(kvs[1], ROW, 
FAMILY, QUALIFIERS[0], 1);
-287assertIncrementKey(kvs[2], ROW, 
FAMILY, QUALIFIERS[2], 1);
-288
-289// Now try multiple columns again
-290inc = new 

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.CreateAndFailSilent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.CreateAndFailSilent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.CreateAndFailSilent.html
index bc14b2e..547dec5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.CreateAndFailSilent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.CreateAndFailSilent.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
index bc14b2e..547dec5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 
SecurityUtil.getServerPrincipal(principalConfig, hostname);
-219

[04/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
new file mode 100644
index 000..ebf0532
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
@@ -0,0 +1,719 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020
+021package org.apache.hadoop.hbase.client;
+022
+023import java.io.IOException;
+024import java.util.ArrayList;
+025import java.util.Arrays;
+026import java.util.List;
+027import 
java.util.concurrent.CountDownLatch;
+028import java.util.concurrent.TimeUnit;
+029import 
java.util.concurrent.atomic.AtomicLong;
+030import 
java.util.concurrent.atomic.AtomicReference;
+031
+032import org.apache.commons.logging.Log;
+033import 
org.apache.commons.logging.LogFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import org.apache.hadoop.hbase.Cell;
+037import 
org.apache.hadoop.hbase.HBaseConfiguration;
+038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+039import 
org.apache.hadoop.hbase.HColumnDescriptor;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HTableDescriptor;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import org.apache.hadoop.hbase.Waiter;
+044
+045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+048import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
+049import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+050import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
+051import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+052import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
+053import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
+054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
+056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+057import 
org.apache.hadoop.hbase.util.Bytes;
+058import 
org.apache.hadoop.hbase.util.Pair;
+059import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+060import org.junit.AfterClass;
+061import org.junit.Assert;
+062import org.junit.BeforeClass;
+063import org.junit.Test;
+064import 
org.junit.experimental.categories.Category;
+065
+066@Category({MediumTests.class, 
ClientTests.class})
+067public class TestReplicaWithCluster {
+068  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
+069
+070  private static final int NB_SERVERS = 
3;
+071  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
+072  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
+073
+074  // second minicluster used in testing 
of replication
+075  private static HBaseTestingUtility 
HTU2;
+076  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
+077
+078  private final static int REFRESH_PERIOD 
= 1000;
+079  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
+080
+081  /**
+082   * This copro is used to synchronize 
the tests.
+083   */
+084  public static class SlowMeCopro 
implements RegionObserver {
+085static final AtomicLong sleepTime = 
new AtomicLong(0);
+086static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
+087
+088public SlowMeCopro() {
+089}
+090
+091@Override
+092public void 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 31adc62..5ca6a7f 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index ef4fba9..8ad631b 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index d0561ac..1f6ea53 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 2537239..0c3a246 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 8d3812a..1a1cdc5 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 0ecd14c..01305b4 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
   

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
index 726be4f..81f3cf1 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":9,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":9,"i47":10,"i48":10,"i49":10,"i50":10,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":9,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":9,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -299,6 +299,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private void
+assertSingleResult(org.apache.hadoop.hbase.client.Resultresult,
+  byte[]row,
+  byte[]family,
+  byte[]qualifier,
+  longvalue)
+
+
+private void
 assertSingleResult(org.apache.hadoop.hbase.client.Resultresult,
   byte[]row,
   byte[]family,
@@ -306,38 +314,38 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
   longts,
   byte[]value)
 
-
+
 private 
org.apache.hadoop.hbase.client.ResultScanner
 buildScanner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeyPrefix,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue,
 org.apache.hadoop.hbase.client.Tableht)
 
-
+
 private 
org.apache.hadoop.hbase.client.Scan
 createScanWithRowFilter(byte[]key)
 
-
+
 private 
org.apache.hadoop.hbase.client.Scan
 createScanWithRowFilter(byte[]key,
byte[]startRow,

org.apache.hadoop.hbase.filter.CompareFilter.CompareOpop)
 
-
+
 private void
 deleteColumns(org.apache.hadoop.hbase.client.Tableht,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue,
  

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index d47bb7f..4bb0df2 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -441,7 +441,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/plugins.html
--
diff --git a/plugins.html b/plugins.html
index 3fb73c1..2d9746d 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Build Plugins
 
@@ -376,7 +376,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index f21e728..45ff061 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -774,7 +774,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 9554481..ee9c691 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Information
 
@@ -340,7 +340,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/project-reports.html
--
diff --git a/project-reports.html b/project-reports.html
index 27ac405..e88988f 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Generated Reports
 
@@ -310,7 +310,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/project-summary.html
--
diff --git a/project-summary.html b/project-summary.html
index 9a66e83..5729d8a 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Summary
 
@@ -336,7 +336,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/pseudo-distributed.html
--
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index e518ee7..db84de8 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -313,7 +313,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-18
+  Last Published: 
2017-05-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/replication.html

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.SlowMeCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.SlowMeCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.SlowMeCopro.html
index a519d7c..ebf0532 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.SlowMeCopro.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.SlowMeCopro.html
@@ -47,538 +47,612 @@
 039import 
org.apache.hadoop.hbase.HColumnDescriptor;
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import org.apache.hadoop.hbase.Waiter;
-043
-044import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-045import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-046import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-048import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-049import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-050import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-051import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-052import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-053import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-054import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-055import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-056import 
org.apache.hadoop.hbase.util.Bytes;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064
-065@Category({MediumTests.class, 
ClientTests.class})
-066public class TestReplicaWithCluster {
-067  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-068
-069  private static final int NB_SERVERS = 
3;
-070  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-071  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-072
-073  // second minicluster used in testing 
of replication
-074  private static HBaseTestingUtility 
HTU2;
-075  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-076
-077  private final static int REFRESH_PERIOD 
= 1000;
-078
-079  /**
-080   * This copro is used to synchronize 
the tests.
-081   */
-082  public static class SlowMeCopro 
implements RegionObserver {
-083static final AtomicLong sleepTime = 
new AtomicLong(0);
-084static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-085
-086public SlowMeCopro() {
-087}
-088
-089@Override
-090public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-091 final Get get, 
final ListCell results) throws IOException {
-092
-093  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-094CountDownLatch latch = 
cdl.get();
-095try {
-096  if (sleepTime.get()  0) {
-097LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-098
Thread.sleep(sleepTime.get());
-099  } else if (latch.getCount() 
 0) {
-100LOG.info("Waiting for the 
counterCountDownLatch");
-101latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-102if (latch.getCount()  0) 
{
-103  throw new 
RuntimeException("Can't wait more");
-104}
-105  }
-106} catch (InterruptedException e1) 
{
-107  LOG.error(e1);
-108}
-109  } else {
-110LOG.info("We're not the primary 
replicas.");
-111  }
-112}
-113  }
-114
-115  /**
-116   * This copro is used to simulate 
region server down exception for Get and Scan
-117   */
-118  public static class 
RegionServerStoppedCopro implements RegionObserver {
-119
-120public RegionServerStoppedCopro() {
-121}
-122
-123@Override
-124public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-125final Get get, final 
ListCell results) throws IOException {
-126
-127  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import org.apache.hadoop.hbase.Waiter;
+044
+045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
index a519d7c..ebf0532 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
@@ -47,538 +47,612 @@
 039import 
org.apache.hadoop.hbase.HColumnDescriptor;
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import org.apache.hadoop.hbase.Waiter;
-043
-044import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-045import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-046import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-048import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-049import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-050import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-051import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-052import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-053import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-054import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-055import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-056import 
org.apache.hadoop.hbase.util.Bytes;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064
-065@Category({MediumTests.class, 
ClientTests.class})
-066public class TestReplicaWithCluster {
-067  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-068
-069  private static final int NB_SERVERS = 
3;
-070  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-071  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-072
-073  // second minicluster used in testing 
of replication
-074  private static HBaseTestingUtility 
HTU2;
-075  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-076
-077  private final static int REFRESH_PERIOD 
= 1000;
-078
-079  /**
-080   * This copro is used to synchronize 
the tests.
-081   */
-082  public static class SlowMeCopro 
implements RegionObserver {
-083static final AtomicLong sleepTime = 
new AtomicLong(0);
-084static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-085
-086public SlowMeCopro() {
-087}
-088
-089@Override
-090public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-091 final Get get, 
final ListCell results) throws IOException {
-092
-093  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-094CountDownLatch latch = 
cdl.get();
-095try {
-096  if (sleepTime.get()  0) {
-097LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-098
Thread.sleep(sleepTime.get());
-099  } else if (latch.getCount() 
 0) {
-100LOG.info("Waiting for the 
counterCountDownLatch");
-101latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-102if (latch.getCount()  0) 
{
-103  throw new 
RuntimeException("Can't wait more");
-104}
-105  }
-106} catch (InterruptedException e1) 
{
-107  LOG.error(e1);
-108}
-109  } else {
-110LOG.info("We're not the primary 
replicas.");
-111  }
-112}
-113  }
-114
-115  /**
-116   * This copro is used to simulate 
region server down exception for Get and Scan
-117   */
-118  public static class 
RegionServerStoppedCopro implements RegionObserver {
-119
-120public RegionServerStoppedCopro() {
-121}
-122
-123@Override
-124public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-125final Get get, final 
ListCell results) throws IOException {
-126
-127  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import org.apache.hadoop.hbase.Waiter;
+044
+045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+047import 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-archetypes/hbase-client-project/source-repository.html
--
diff --git a/hbase-archetypes/hbase-client-project/source-repository.html 
b/hbase-archetypes/hbase-client-project/source-repository.html
index 20b5af7..20b0359 100644
--- a/hbase-archetypes/hbase-client-project/source-repository.html
+++ b/hbase-archetypes/hbase-client-project/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-archetypes/hbase-client-project/team-list.html
--
diff --git a/hbase-archetypes/hbase-client-project/team-list.html 
b/hbase-archetypes/hbase-client-project/team-list.html
index 8b8516c..737fb6a 100644
--- a/hbase-archetypes/hbase-client-project/team-list.html
+++ b/hbase-archetypes/hbase-client-project/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html 
b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
index a750201..596c02d 100644
--- a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-shaded-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependencies.html 
b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
index 73f694a..9bcb976 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependencies.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-18
+Last Published: 2017-05-19
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-shaded-client archetype
@@ -3599,7 +3599,7 @@ These include: bzip2, gzip, pack200, xz and ar, cpio, 
jar, tar, zip, dump.
 -
 
 hbase-server-2.0.0-SNAPSHOT-tests.jar
-7.69 MB
+7.70 MB
 -
 -
 -

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
index 821ed93..24f41e5 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index be7f8e5..37574d7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -147,1885 +147,1897 @@
 139  private final boolean 
hostnamesCanChange;
 140  private final long pause;
 141  private final long pauseForCQTBE;// 
pause for CallQueueTooBigException, if specified
-142  private final boolean 
useMetaReplicas;
-143  private final int numTries;
-144  final int rpcTimeout;
-145
-146  /**
-147   * Global nonceGenerator shared per 
client.Currently there's no reason to limit its scope.
-148   * Once it's set under 
nonceGeneratorCreateLock, it is never unset or changed.
-149   */
-150  private static volatile NonceGenerator 
nonceGenerator = null;
-151  /** The nonce generator lock. Only 
taken when creating Connection, which gets a private copy. */
-152  private static final Object 
nonceGeneratorCreateLock = new Object();
-153
-154  private final AsyncProcess 
asyncProcess;
-155  // single tracker per connection
-156  private final ServerStatisticTracker 
stats;
-157
-158  private volatile boolean closed;
-159  private volatile boolean aborted;
-160
-161  // package protected for the tests
-162  ClusterStatusListener 
clusterStatusListener;
-163
-164  private final Object metaRegionLock = 
new Object();
-165
-166  // We have a single lock for master 
 zk to prevent deadlocks. Having
-167  //  one lock for ZK and one lock for 
master is not possible:
-168  //  When creating a connection to 
master, we need a connection to ZK to get
-169  //  its address. But another thread 
could have taken the ZK lock, and could
-170  //  be waiting for the master lock 
= deadlock.
-171  private final Object masterAndZKLock = 
new Object();
-172
-173  // thread executor shared by all Table 
instances created
-174  // by this connection
-175  private volatile ExecutorService 
batchPool = null;
-176  // meta thread executor shared by all 
Table instances created
-177  // by this connection
-178  private volatile ExecutorService 
metaLookupPool = null;
-179  private volatile boolean cleanupPool = 
false;
-180
-181  private final Configuration conf;
-182
-183  // cache the configuration value for 
tables so that we can avoid calling
-184  // the expensive Configuration to fetch 
the value multiple times.
-185  private final ConnectionConfiguration 
connectionConfig;
-186
-187  // Client rpc instance.
-188  private final RpcClient rpcClient;
-189
-190  private final MetaCache metaCache;
-191  private final MetricsConnection 
metrics;
-192
-193  protected User user;
-194
-195  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-196
-197  private final RpcControllerFactory 
rpcControllerFactory;
-198
-199  private final RetryingCallerInterceptor 
interceptor;
-200
-201  /**
-202   * Cluster registry of basic info such 
as clusterid and meta region location.
-203   */
-204  Registry registry;
-205
-206  private final ClientBackoffPolicy 
backoffPolicy;
-207
-208  /**
-209   * Allow setting an alternate 
BufferedMutator implementation via
-210   * config. If null, use default.
-211   */
-212  private final String 
alternateBufferedMutatorClassName;
-213
-214  /**
-215   * constructor
-216   * @param conf Configuration object
-217   */
-218  ConnectionImplementation(Configuration 
conf,
-219   
ExecutorService pool, User user) throws IOException {
-220this.conf = conf;
-221this.user = user;
-222this.batchPool = pool;
-223this.connectionConfig = new 
ConnectionConfiguration(conf);
-224this.closed = false;
-225this.pause = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-226
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-227long configuredPauseForCQTBE = 
conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause);
-228if (configuredPauseForCQTBE  
pause) {
-229  LOG.warn("The " + 
HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: "
-230  + configuredPauseForCQTBE + " 
is smaller than " + HConstants.HBASE_CLIENT_PAUSE
-231  + ", will use " + pause + " 
instead.");
-232  this.pauseForCQTBE = pause;
-233} else {
-234  this.pauseForCQTBE = 
configuredPauseForCQTBE;
-235}
-236this.useMetaReplicas = 
conf.getBoolean(HConstants.USE_META_REPLICAS,
-237  
HConstants.DEFAULT_USE_META_REPLICAS);
-238// how many times to try, one more 
than max *retry* time
-239this.numTries = 
retries2Attempts(connectionConfig.getRetriesNumber());
-240this.rpcTimeout = conf.getInt(
-241

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.NodeAndData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.NodeAndData.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.NodeAndData.html
index bc14b2e..547dec5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.NodeAndData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.NodeAndData.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 
SecurityUtil.getServerPrincipal(principalConfig, hostname);
-219
-220// Initialize the 

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
index 7a0715f..b947231 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
-028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
+028@org.jamon.annotations.Argument(name 
= "format", type = "String"),
 029@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
 030@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "format", type = "String")})
+031@org.jamon.annotations.Argument(name 
= "bcv", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -77,23 +77,23 @@
 069  return m_regionServer;
 070}
 071private HRegionServer 
m_regionServer;
-072// 24, 1
-073public void setBcv(String bcv)
+072// 22, 1
+073public void setFormat(String 
format)
 074{
-075  // 24, 1
-076  m_bcv = bcv;
-077  m_bcv__IsNotDefault = true;
+075  // 22, 1
+076  m_format = format;
+077  m_format__IsNotDefault = true;
 078}
-079public String getBcv()
+079public String getFormat()
 080{
-081  return m_bcv;
+081  return m_format;
 082}
-083private String m_bcv;
-084public boolean 
getBcv__IsNotDefault()
+083private String m_format;
+084public boolean 
getFormat__IsNotDefault()
 085{
-086  return m_bcv__IsNotDefault;
+086  return m_format__IsNotDefault;
 087}
-088private boolean 
m_bcv__IsNotDefault;
+088private boolean 
m_format__IsNotDefault;
 089// 23, 1
 090public void setBcn(String bcn)
 091{
@@ -128,23 +128,23 @@
 120  return m_filter__IsNotDefault;
 121}
 122private boolean 
m_filter__IsNotDefault;
-123// 22, 1
-124public void setFormat(String 
format)
+123// 24, 1
+124public void setBcv(String bcv)
 125{
-126  // 22, 1
-127  m_format = format;
-128  m_format__IsNotDefault = true;
+126  // 24, 1
+127  m_bcv = bcv;
+128  m_bcv__IsNotDefault = true;
 129}
-130public String getFormat()
+130public String getBcv()
 131{
-132  return m_format;
+132  return m_bcv;
 133}
-134private String m_format;
-135public boolean 
getFormat__IsNotDefault()
+134private String m_bcv;
+135public boolean 
getBcv__IsNotDefault()
 136{
-137  return m_format__IsNotDefault;
+137  return m_bcv__IsNotDefault;
 138}
-139private boolean 
m_format__IsNotDefault;
+139private boolean 
m_bcv__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -156,10 +156,10 @@
 148return (ImplData) 
super.getImplData();
 149  }
 150  
-151  protected String bcv;
-152  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv)
+151  protected String format;
+152  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
 153  {
-154(getImplData()).setBcv(p_bcv);
+154
(getImplData()).setFormat(p_format);
 155return this;
 156  }
 157  
@@ -177,10 +177,10 @@
 169return this;
 170  }
 171  
-172  protected String format;
-173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+172  protected String bcv;
+173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv)
 174  {
-175
(getImplData()).setFormat(p_format);
+175(getImplData()).setBcv(p_bcv);
 176return this;
 177  }
 178  

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
index 7a0715f..b947231 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 26763d8..7ab70ee 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"2.0.0-SNAPSHOT";
-011  public static final String revision = 
"6dc4190c07a6e3039f6c32bdc9a8aeb5483ea192";
+011  public static final String revision = 
"3fe4b28bb07466e6ba245d8c40da425db70f706c";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Thu 
May 18 14:39:15 UTC 2017";
+013  public static final String date = "Fri 
May 19 14:38:50 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "735e5b81eac121a4b4bd8279902e0c0d";
+015  public static final String srcChecksum 
= "280f277e2bca9ab8a702cba79685bbea";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
index b6cbb52..1612d60 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
@@ -48,24 +48,24 @@
 040  private final long 
scannerMaxResultSize;
 041  private final int 
primaryCallTimeoutMicroSecond;
 042  private final int 
replicaCallTimeoutMicroSecondScan;
-043  private final int retries;
-044  private final int maxKeyValueSize;
-045  private final int rpcTimeout;
-046  private final int readRpcTimeout;
-047  private final int writeRpcTimeout;
-048  // toggle for async/sync prefetch
-049  private final boolean 
clientScannerAsyncPrefetch;
-050
-051/**
-052   * Constructor
-053   * @param conf Configuration object
-054   */
-055  ConnectionConfiguration(Configuration 
conf) {
-056this.writeBufferSize = 
conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT);
-057
-058this.metaOperationTimeout = 
conf.getInt(
-059  
HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
-060  
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+043  private final int 
metaReplicaCallTimeoutMicroSecondScan;
+044  private final int retries;
+045  private final int maxKeyValueSize;
+046  private final int rpcTimeout;
+047  private final int readRpcTimeout;
+048  private final int writeRpcTimeout;
+049  // toggle for async/sync prefetch
+050  private final boolean 
clientScannerAsyncPrefetch;
+051
+052/**
+053   * Constructor
+054   * @param conf Configuration object
+055   */
+056  ConnectionConfiguration(Configuration 
conf) {
+057this.writeBufferSize = 
conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT);
+058
+059this.metaOperationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
+060
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
 061
 062this.operationTimeout = 
conf.getInt(
 063  
HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
@@ -75,7 +75,7 @@
 067
 068this.scannerMaxResultSize =
 069
conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
-070  
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
+070
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
 071
 072this.primaryCallTimeoutMicroSecond 
=
 073
conf.getInt("hbase.client.primaryCallTimeout.get", 1); // 10ms
@@ -83,99 +83,109 @@
 075
this.replicaCallTimeoutMicroSecondScan =
 076
conf.getInt("hbase.client.replicaCallTimeout.scan", 100); // 1000 ms
 077
-078this.retries = conf.getInt(
-079   
HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-080
-081this.clientScannerAsyncPrefetch = 
conf.getBoolean(
-082   
Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, 
Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH);
-083
-084this.maxKeyValueSize = 
conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT);
-085
-086this.rpcTimeout =
-087
conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-088
-089this.readRpcTimeout = 
conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
-090
conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 

[17/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.SetData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.SetData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.SetData.html
index bc14b2e..547dec5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.SetData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.SetData.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 
SecurityUtil.getServerPrincipal(principalConfig, hostname);
-219
-220  

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.html
index 7d32ed2..293c833 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.html
@@ -1435,4908 +1435,5004 @@
 1427  }
 1428
 1429  @Test
-1430  public void testVersions() throws 
Exception {
+1430  public void testNullQualifier() throws 
Exception {
 1431final TableName tableName = 
TableName.valueOf(name.getMethodName());
-1432
-1433long [] STAMPS = makeStamps(20);
-1434byte [][] VALUES = makeNAscii(VALUE, 
20);
-1435
-1436Table ht = 
TEST_UTIL.createTable(tableName, FAMILY, 10);
-1437
-1438// Insert 4 versions of same 
column
-1439Put put = new Put(ROW);
-1440put.addColumn(FAMILY, QUALIFIER, 
STAMPS[1], VALUES[1]);
-1441put.addColumn(FAMILY, QUALIFIER, 
STAMPS[2], VALUES[2]);
-1442put.addColumn(FAMILY, QUALIFIER, 
STAMPS[4], VALUES[4]);
-1443put.addColumn(FAMILY, QUALIFIER, 
STAMPS[5], VALUES[5]);
-1444ht.put(put);
-1445
-1446// Verify we can get each one 
properly
-1447getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[1], VALUES[1]);
-1448getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[2], VALUES[2]);
-1449getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[4], VALUES[4]);
-1450getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[5], VALUES[5]);
-1451scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[1], VALUES[1]);
-1452scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[2], VALUES[2]);
-1453scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[4], VALUES[4]);
-1454scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[5], VALUES[5]);
-1455
-1456// Verify we don't accidentally get 
others
-1457getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[0]);
-1458getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[3]);
-1459getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[6]);
-1460scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[0]);
-1461scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[3]);
-1462scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[6]);
-1463
-1464// Ensure maxVersions in query is 
respected
-1465Get get = new Get(ROW);
-1466get.addColumn(FAMILY, QUALIFIER);
-1467get.setMaxVersions(2);
-1468Result result = ht.get(get);
-1469assertNResult(result, ROW, FAMILY, 
QUALIFIER,
-1470new long [] {STAMPS[4], 
STAMPS[5]},
-1471new byte[][] {VALUES[4], 
VALUES[5]},
-14720, 1);
-1473
-1474Scan scan = new Scan(ROW);
-1475scan.addColumn(FAMILY, QUALIFIER);
-1476scan.setMaxVersions(2);
-1477result = getSingleScanResult(ht, 
scan);
-1478assertNResult(result, ROW, FAMILY, 
QUALIFIER,
-1479new long [] {STAMPS[4], 
STAMPS[5]},
-1480new byte[][] {VALUES[4], 
VALUES[5]},
-14810, 1);
-1482
-1483// Flush and redo
+1432Table table = 
TEST_UTIL.createTable(tableName, FAMILY);
+1433
+1434// Work for Put
+1435Put put = new Put(ROW);
+1436put.addColumn(FAMILY, null, 
VALUE);
+1437table.put(put);
+1438
+1439// Work for Get, Scan
+1440getTestNull(table, ROW, FAMILY, 
VALUE);
+1441scanTestNull(table, ROW, FAMILY, 
VALUE);
+1442
+1443// Work for Delete
+1444Delete delete = new Delete(ROW);
+1445delete.addColumns(FAMILY, null);
+1446table.delete(delete);
+1447
+1448Get get = new Get(ROW);
+1449Result result = table.get(get);
+1450assertEmptyResult(result);
+1451
+1452// Work for Increment/Append
+1453Increment increment = new 
Increment(ROW);
+1454increment.addColumn(FAMILY, null, 
1L);
+1455table.increment(increment);
+1456getTestNull(table, ROW, FAMILY, 
1L);
+1457
+1458table.incrementColumnValue(ROW, 
FAMILY, null, 1L);
+1459getTestNull(table, ROW, FAMILY, 
2L);
+1460
+1461delete = new Delete(ROW);
+1462delete.addColumns(FAMILY, null);
+1463table.delete(delete);
+1464
+1465Append append = new Append(ROW);
+1466append.add(FAMILY, null, VALUE);
+1467table.append(append);
+1468getTestNull(table, ROW, FAMILY, 
VALUE);
+1469
+1470// Work for checkAndMutate, 
checkAndPut, checkAndDelete
+1471put = new Put(ROW);
+1472put.addColumn(FAMILY, null, 
Bytes.toBytes("checkAndPut"));
+1473table.put(put);
+1474table.checkAndPut(ROW, FAMILY, null, 
VALUE, put);
+1475
+1476RowMutations mutate = new 
RowMutations(ROW);
+1477mutate.add(new 
Put(ROW).addColumn(FAMILY, null, 

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
index bc14b2e..547dec5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 
SecurityUtil.getServerPrincipal(principalConfig, hostname);
-219
-220// Initialize the "jaas.conf" for 
keyTab/principal,
-221// If keyTab is not 

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html
index bc14b2e..547dec5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.DeleteNodeFailSilent.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.ExceptionInReseekRegionObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.ExceptionInReseekRegionObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.ExceptionInReseekRegionObserver.html
index 7d32ed2..293c833 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.ExceptionInReseekRegionObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide.ExceptionInReseekRegionObserver.html
@@ -1435,4908 +1435,5004 @@
 1427  }
 1428
 1429  @Test
-1430  public void testVersions() throws 
Exception {
+1430  public void testNullQualifier() throws 
Exception {
 1431final TableName tableName = 
TableName.valueOf(name.getMethodName());
-1432
-1433long [] STAMPS = makeStamps(20);
-1434byte [][] VALUES = makeNAscii(VALUE, 
20);
-1435
-1436Table ht = 
TEST_UTIL.createTable(tableName, FAMILY, 10);
-1437
-1438// Insert 4 versions of same 
column
-1439Put put = new Put(ROW);
-1440put.addColumn(FAMILY, QUALIFIER, 
STAMPS[1], VALUES[1]);
-1441put.addColumn(FAMILY, QUALIFIER, 
STAMPS[2], VALUES[2]);
-1442put.addColumn(FAMILY, QUALIFIER, 
STAMPS[4], VALUES[4]);
-1443put.addColumn(FAMILY, QUALIFIER, 
STAMPS[5], VALUES[5]);
-1444ht.put(put);
-1445
-1446// Verify we can get each one 
properly
-1447getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[1], VALUES[1]);
-1448getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[2], VALUES[2]);
-1449getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[4], VALUES[4]);
-1450getVersionAndVerify(ht, ROW, FAMILY, 
QUALIFIER, STAMPS[5], VALUES[5]);
-1451scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[1], VALUES[1]);
-1452scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[2], VALUES[2]);
-1453scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[4], VALUES[4]);
-1454scanVersionAndVerify(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[5], VALUES[5]);
-1455
-1456// Verify we don't accidentally get 
others
-1457getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[0]);
-1458getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[3]);
-1459getVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[6]);
-1460scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[0]);
-1461scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[3]);
-1462scanVersionAndVerifyMissing(ht, ROW, 
FAMILY, QUALIFIER, STAMPS[6]);
-1463
-1464// Ensure maxVersions in query is 
respected
-1465Get get = new Get(ROW);
-1466get.addColumn(FAMILY, QUALIFIER);
-1467get.setMaxVersions(2);
-1468Result result = ht.get(get);
-1469assertNResult(result, ROW, FAMILY, 
QUALIFIER,
-1470new long [] {STAMPS[4], 
STAMPS[5]},
-1471new byte[][] {VALUES[4], 
VALUES[5]},
-14720, 1);
-1473
-1474Scan scan = new Scan(ROW);
-1475scan.addColumn(FAMILY, QUALIFIER);
-1476scan.setMaxVersions(2);
-1477result = getSingleScanResult(ht, 
scan);
-1478assertNResult(result, ROW, FAMILY, 
QUALIFIER,
-1479new long [] {STAMPS[4], 
STAMPS[5]},
-1480new byte[][] {VALUES[4], 
VALUES[5]},
-14810, 1);
-1482
-1483// Flush and redo
+1432Table table = 
TEST_UTIL.createTable(tableName, FAMILY);
+1433
+1434// Work for Put
+1435Put put = new Put(ROW);
+1436put.addColumn(FAMILY, null, 
VALUE);
+1437table.put(put);
+1438
+1439// Work for Get, Scan
+1440getTestNull(table, ROW, FAMILY, 
VALUE);
+1441scanTestNull(table, ROW, FAMILY, 
VALUE);
+1442
+1443// Work for Delete
+1444Delete delete = new Delete(ROW);
+1445delete.addColumns(FAMILY, null);
+1446table.delete(delete);
+1447
+1448Get get = new Get(ROW);
+1449Result result = table.get(get);
+1450assertEmptyResult(result);
+1451
+1452// Work for Increment/Append
+1453Increment increment = new 
Increment(ROW);
+1454increment.addColumn(FAMILY, null, 
1L);
+1455table.increment(increment);
+1456getTestNull(table, ROW, FAMILY, 
1L);
+1457
+1458table.incrementColumnValue(ROW, 
FAMILY, null, 1L);
+1459getTestNull(table, ROW, FAMILY, 
2L);
+1460
+1461delete = new Delete(ROW);
+1462delete.addColumns(FAMILY, null);
+1463table.delete(delete);
+1464
+1465Append append = new Append(ROW);
+1466append.add(FAMILY, null, VALUE);
+1467table.append(append);
+1468getTestNull(table, ROW, FAMILY, 
VALUE);
+1469
+1470// Work for checkAndMutate, 
checkAndPut, checkAndDelete
+1471put = new Put(ROW);
+1472put.addColumn(FAMILY, null, 
Bytes.toBytes("checkAndPut"));
+1473table.put(put);
+1474table.checkAndPut(ROW, 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html
index bc14b2e..547dec5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html
@@ -139,2012 +139,2013 @@
 131int retry = 
conf.getInt("zookeeper.recovery.retry", 3);
 132int retryIntervalMillis =
 133  
conf.getInt("zookeeper.recovery.retry.intervalmill", 1000);
-134zkDumpConnectionTimeOut = 
conf.getInt("zookeeper.dump.connection.timeout",
-1351000);
-136return new 
RecoverableZooKeeper(ensemble, timeout, watcher,
-137retry, retryIntervalMillis, 
identifier);
-138  }
-139
-140  /**
-141   * Log in the current zookeeper server 
process using the given configuration
-142   * keys for the credential file and 
login principal.
-143   *
-144   * pstrongThis is only 
applicable when running on secure hbase/strong
-145   * On regular HBase (without security 
features), this will safely be ignored.
-146   * /p
-147   *
-148   * @param conf The configuration data 
to use
-149   * @param keytabFileKey Property key 
used to configure the path to the credential file
-150   * @param userNameKey Property key used 
to configure the login principal
-151   * @param hostname Current hostname to 
use in any credentials
-152   * @throws IOException underlying 
exception from SecurityUtil.login() call
-153   */
-154  public static void 
loginServer(Configuration conf, String keytabFileKey,
-155  String userNameKey, String 
hostname) throws IOException {
-156login(conf, keytabFileKey, 
userNameKey, hostname,
-157  
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-158  
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME);
-159  }
-160
-161  /**
-162   * Log in the current zookeeper client 
using the given configuration
-163   * keys for the credential file and 
login principal.
-164   *
-165   * pstrongThis is only 
applicable when running on secure hbase/strong
-166   * On regular HBase (without security 
features), this will safely be ignored.
-167   * /p
-168   *
-169   * @param conf The configuration data 
to use
-170   * @param keytabFileKey Property key 
used to configure the path to the credential file
-171   * @param userNameKey Property key used 
to configure the login principal
-172   * @param hostname Current hostname to 
use in any credentials
-173   * @throws IOException underlying 
exception from SecurityUtil.login() call
-174   */
-175  public static void 
loginClient(Configuration conf, String keytabFileKey,
-176  String userNameKey, String 
hostname) throws IOException {
-177login(conf, keytabFileKey, 
userNameKey, hostname,
-178  
ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-179  
JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME);
-180  }
-181
-182  /**
-183   * Log in the current process using the 
given configuration keys for the
-184   * credential file and login 
principal.
-185   *
-186   * pstrongThis is only 
applicable when running on secure hbase/strong
-187   * On regular HBase (without security 
features), this will safely be ignored.
-188   * /p
-189   *
-190   * @param conf The configuration data 
to use
-191   * @param keytabFileKey Property key 
used to configure the path to the credential file
-192   * @param userNameKey Property key used 
to configure the login principal
-193   * @param hostname Current hostname to 
use in any credentials
-194   * @param loginContextProperty property 
name to expose the entry name
-195   * @param loginContextName jaas entry 
name
-196   * @throws IOException underlying 
exception from SecurityUtil.login() call
-197   */
-198  private static void login(Configuration 
conf, String keytabFileKey,
-199  String userNameKey, String 
hostname,
-200  String loginContextProperty, String 
loginContextName)
-201  throws IOException {
-202if (!isSecureZooKeeper(conf))
-203  return;
-204
-205// User has specified a jaas.conf, 
keep this one as the good one.
-206// 
HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf"
-207if 
(System.getProperty("java.security.auth.login.config") != null)
-208  return;
-209
-210// No keytab specified, no auth
-211String keytabFilename = 
conf.get(keytabFileKey);
-212if (keytabFilename == null) {
-213  LOG.warn("no keytab specified for: 
" + keytabFileKey);
-214  return;
-215}
-216
-217String principalConfig = 
conf.get(userNameKey, System.getProperty("user.name"));
-218String principalName = 
SecurityUtil.getServerPrincipal(principalConfig, hostname);
-219
-220// Initialize the "jaas.conf" for 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 7b99d74..366fcc2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -127,222 +127,219 @@
 119if (family == null) {
 120  throw new 
IllegalArgumentException("family cannot be null");
 121}
-122if (qualifier == null) {
-123  throw new 
IllegalArgumentException("qualifier cannot be null");
-124}
-125ListCell list = 
getCellList(family);
-126KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-127list.add(kv);
-128
familyMap.put(CellUtil.cloneFamily(kv), list);
-129return this;
-130  }
-131
-132  /**
-133   * Gets the TimeRange used for this 
increment.
-134   * @return TimeRange
-135   */
-136  public TimeRange getTimeRange() {
-137return this.tr;
-138  }
-139
-140  /**
-141   * Sets the TimeRange to be used on the 
Get for this increment.
-142   * p
-143   * This is useful for when you have 
counters that only last for specific
-144   * periods of time (ie. counters that 
are partitioned by time).  By setting
-145   * the range of valid times for this 
increment, you can potentially gain
-146   * some performance with a more optimal 
Get operation.
-147   * p
-148   * This range is used as [minStamp, 
maxStamp).
-149   * @param minStamp minimum timestamp 
value, inclusive
-150   * @param maxStamp maximum timestamp 
value, exclusive
-151   * @throws IOException if invalid time 
range
-152   * @return this
-153   */
-154  public Increment setTimeRange(long 
minStamp, long maxStamp)
-155  throws IOException {
-156tr = new TimeRange(minStamp, 
maxStamp);
-157return this;
-158  }
-159  
-160  /**
-161   * @param returnResults True (default) 
if the increment operation should return the results. A
-162   *  client that is not 
interested in the result can save network bandwidth setting this
-163   *  to false.
-164   */
-165  public Increment 
setReturnResults(boolean returnResults) {
-166
super.setReturnResults(returnResults);
-167return this;
-168  }
-169
-170  /**
-171   * @return current setting for 
returnResults
-172   */
-173  // This method makes public the 
superclasses's protected method.
-174  public boolean isReturnResults() {
-175return super.isReturnResults();
-176  }
-177
-178  /**
-179   * Method for retrieving the number of 
families to increment from
-180   * @return number of families
-181   */
-182  @Override
-183  public int numFamilies() {
-184return this.familyMap.size();
-185  }
-186
-187  /**
-188   * Method for checking if any families 
have been inserted into this Increment
-189   * @return true if familyMap is non 
empty false otherwise
-190   */
-191  public boolean hasFamilies() {
-192return !this.familyMap.isEmpty();
-193  }
-194
-195  /**
-196   * Before 0.95, when you called 
Increment#getFamilyMap(), you got back
-197   * a map of families to a list of 
Longs.  Now, {@link #getFamilyCellMap()} returns
-198   * families by list of Cells.  This 
method has been added so you can have the
-199   * old behavior.
-200   * @return Map of families to a Map of 
qualifiers and their Long increments.
-201   * @since 0.95.0
-202   */
-203  public Mapbyte[], 
NavigableMapbyte [], Long getFamilyMapOfLongs() {
-204NavigableMapbyte[], 
ListCell map = super.getFamilyCellMap();
-205Mapbyte [], 
NavigableMapbyte[], Long results = new 
TreeMap(Bytes.BYTES_COMPARATOR);
-206for (Map.Entrybyte [], 
ListCell entry: map.entrySet()) {
-207  NavigableMapbyte [], Long 
longs = new TreeMap(Bytes.BYTES_COMPARATOR);
-208  for (Cell cell: entry.getValue()) 
{
-209
longs.put(CellUtil.cloneQualifier(cell),
-210
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength()));
-211  }
-212  results.put(entry.getKey(), 
longs);
-213}
-214return results;
-215  }
-216
-217  /**
-218   * @return String
-219   */
-220  @Override
-221  public String toString() {
-222StringBuilder sb = new 
StringBuilder();
-223sb.append("row=");
-224
sb.append(Bytes.toStringBinary(this.row));
-225if(this.familyMap.isEmpty()) {
-226  sb.append(", no columns set to be 
incremented");
-227  return sb.toString();
-228}
-229sb.append(", families=");
-230boolean moreThanOne = false;
-231for(Map.Entrybyte [], 
ListCell entry: this.familyMap.entrySet()) {
-232  if(moreThanOne) {
-233sb.append("), ");
-234  } else {
-235moreThanOne = true;
-236sb.append("{");
-237  }
-238  sb.append("(family=");
-239  

  1   2   >